diff --git a/.ci/all_requirements.txt b/.ci/all_requirements.txt index ac9682a09bec1..4918d7519291f 100644 --- a/.ci/all_requirements.txt +++ b/.ci/all_requirements.txt @@ -12,6 +12,94 @@ certifi==2025.8.3 \ --hash=sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407 \ --hash=sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5 # via requests +cffi==2.0.0 \ + --hash=sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb \ + --hash=sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b \ + --hash=sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f \ + --hash=sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9 \ + --hash=sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44 \ + --hash=sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2 \ + --hash=sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c \ + --hash=sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75 \ + --hash=sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65 \ + --hash=sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e \ + --hash=sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a \ + --hash=sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e \ + --hash=sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25 \ + --hash=sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a \ + --hash=sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe \ + --hash=sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b \ + --hash=sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91 \ + --hash=sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592 \ + --hash=sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187 \ + --hash=sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c \ + --hash=sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1 \ + --hash=sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94 \ + --hash=sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba \ + --hash=sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb \ + --hash=sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165 \ + --hash=sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529 \ + --hash=sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca \ + --hash=sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c \ + --hash=sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6 \ + --hash=sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c \ + --hash=sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0 \ + --hash=sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743 \ + --hash=sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63 \ + --hash=sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5 \ + --hash=sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5 \ + --hash=sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4 \ + --hash=sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d \ + --hash=sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b \ + --hash=sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93 \ + --hash=sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205 \ + --hash=sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27 \ + --hash=sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512 \ + --hash=sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d \ + --hash=sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c \ + --hash=sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037 \ + --hash=sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26 \ + --hash=sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322 \ + --hash=sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb \ + --hash=sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c \ + --hash=sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8 \ + --hash=sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4 \ + --hash=sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414 \ + --hash=sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9 \ + --hash=sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664 \ + --hash=sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9 \ + --hash=sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775 \ + --hash=sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739 \ + --hash=sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc \ + --hash=sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062 \ + --hash=sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe \ + --hash=sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9 \ + --hash=sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92 \ + --hash=sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5 \ + --hash=sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13 \ + --hash=sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d \ + --hash=sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26 \ + --hash=sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f \ + --hash=sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495 \ + --hash=sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b \ + --hash=sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6 \ + --hash=sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c \ + --hash=sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef \ + --hash=sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5 \ + --hash=sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18 \ + --hash=sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad \ + --hash=sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3 \ + --hash=sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7 \ + --hash=sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5 \ + --hash=sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534 \ + --hash=sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49 \ + --hash=sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2 \ + --hash=sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5 \ + --hash=sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453 \ + --hash=sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf + # via + # cryptography + # pynacl charset-normalizer==3.4.3 \ --hash=sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91 \ --hash=sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0 \ @@ -93,6 +181,62 @@ charset-normalizer==3.4.3 \ --hash=sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c \ --hash=sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9 # via requests +cryptography==46.0.3 \ + --hash=sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217 \ + --hash=sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d \ + --hash=sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc \ + --hash=sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71 \ + --hash=sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971 \ + --hash=sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a \ + --hash=sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926 \ + --hash=sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc \ + --hash=sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d \ + --hash=sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b \ + --hash=sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20 \ + --hash=sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044 \ + --hash=sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3 \ + --hash=sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715 \ + --hash=sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4 \ + --hash=sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506 \ + --hash=sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f \ + --hash=sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0 \ + --hash=sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683 \ + --hash=sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3 \ + --hash=sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21 \ + --hash=sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91 \ + --hash=sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c \ + --hash=sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8 \ + --hash=sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df \ + --hash=sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c \ + --hash=sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb \ + --hash=sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7 \ + --hash=sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04 \ + --hash=sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db \ + --hash=sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459 \ + --hash=sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea \ + --hash=sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914 \ + --hash=sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717 \ + --hash=sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9 \ + --hash=sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac \ + --hash=sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32 \ + --hash=sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec \ + --hash=sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1 \ + --hash=sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb \ + --hash=sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac \ + --hash=sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665 \ + --hash=sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e \ + --hash=sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb \ + --hash=sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5 \ + --hash=sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936 \ + --hash=sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de \ + --hash=sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372 \ + --hash=sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54 \ + --hash=sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422 \ + --hash=sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849 \ + --hash=sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c \ + --hash=sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963 \ + --hash=sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018 + # via pyjwt google-api-core==2.25.1 \ --hash=sha256:8a2a56c1fef82987a524371f99f3bd0143702fecc670c72e600c1cda6bf8dbb7 \ --hash=sha256:d2aaa0b13c78c61cb3f4282c464c046e45fbd75755683c9c525e6e8f7ed0a5e8 @@ -303,6 +447,47 @@ pybind11==2.13.6 \ --hash=sha256:237c41e29157b962835d356b370ededd57594a26d5894a795960f0047cb5caf5 \ --hash=sha256:ba6af10348c12b24e92fa086b39cfba0eff619b61ac77c406167d813b096d39a # via -r mlir/python/requirements.txt +pycparser==2.23 \ + --hash=sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2 \ + --hash=sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934 + # via cffi +pygithub==2.8.1 \ + --hash=sha256:23a0a5bca93baef082e03411bf0ce27204c32be8bfa7abc92fe4a3e132936df0 \ + --hash=sha256:341b7c78521cb07324ff670afd1baa2bf5c286f8d9fd302c1798ba594a5400c9 + # via -r .ci/requirements.txt +pyjwt[crypto]==2.10.1 \ + --hash=sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953 \ + --hash=sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb + # via pygithub +pynacl==1.6.0 \ + --hash=sha256:04f20784083014e265ad58c1b2dd562c3e35864b5394a14ab54f5d150ee9e53e \ + --hash=sha256:10d755cf2a455d8c0f8c767a43d68f24d163b8fe93ccfaabfa7bafd26be58d73 \ + --hash=sha256:140373378e34a1f6977e573033d1dd1de88d2a5d90ec6958c9485b2fd9f3eb90 \ + --hash=sha256:16c60daceee88d04f8d41d0a4004a7ed8d9a5126b997efd2933e08e93a3bd850 \ + --hash=sha256:16dd347cdc8ae0b0f6187a2608c0af1c8b7ecbbe6b4a06bff8253c192f696990 \ + --hash=sha256:25720bad35dfac34a2bcdd61d9e08d6bfc6041bebc7751d9c9f2446cf1e77d64 \ + --hash=sha256:2d6cd56ce4998cb66a6c112fda7b1fdce5266c9f05044fa72972613bef376d15 \ + --hash=sha256:347dcddce0b4d83ed3f32fd00379c83c425abee5a9d2cd0a2c84871334eaff64 \ + --hash=sha256:4853c154dc16ea12f8f3ee4b7e763331876316cc3a9f06aeedf39bcdca8f9995 \ + --hash=sha256:49c336dd80ea54780bcff6a03ee1a476be1612423010472e60af83452aa0f442 \ + --hash=sha256:4a25cfede801f01e54179b8ff9514bd7b5944da560b7040939732d1804d25419 \ + --hash=sha256:51fed9fe1bec9e7ff9af31cd0abba179d0e984a2960c77e8e5292c7e9b7f7b5d \ + --hash=sha256:536703b8f90e911294831a7fbcd0c062b837f3ccaa923d92a6254e11178aaf42 \ + --hash=sha256:5789f016e08e5606803161ba24de01b5a345d24590a80323379fc4408832d290 \ + --hash=sha256:6b08eab48c9669d515a344fb0ef27e2cbde847721e34bba94a343baa0f33f1f4 \ + --hash=sha256:6b393bc5e5a0eb86bb85b533deb2d2c815666665f840a09e0aa3362bb6088736 \ + --hash=sha256:84709cea8f888e618c21ed9a0efdb1a59cc63141c403db8bf56c469b71ad56f2 \ + --hash=sha256:8bfaa0a28a1ab718bad6239979a5a57a8d1506d0caf2fba17e524dbb409441cf \ + --hash=sha256:bbcc4452a1eb10cd5217318c822fde4be279c9de8567f78bad24c773c21254f8 \ + --hash=sha256:cb36deafe6e2bce3b286e5d1f3e1c246e0ccdb8808ddb4550bb2792f2df298f2 \ + --hash=sha256:cf831615cc16ba324240de79d925eacae8265b7691412ac6b24221db157f6bd1 \ + --hash=sha256:dcdeb41c22ff3c66eef5e63049abf7639e0db4edee57ba70531fc1b6b133185d \ + --hash=sha256:dea103a1afcbc333bc0e992e64233d360d393d1e63d0bc88554f572365664348 \ + --hash=sha256:ef214b90556bb46a485b7da8258e59204c244b1b5b576fb71848819b468c44a7 \ + --hash=sha256:f3482abf0f9815e7246d461fab597aa179b7524628a4bc36f86a7dc418d2608d \ + --hash=sha256:f46386c24a65383a9081d68e9c2de909b1834ec74ff3013271f1bca9c2d233eb \ + --hash=sha256:f4b3824920e206b4f52abd7de621ea7a44fd3cb5c8daceb7c3612345dfc54f2e + # via pygithub pyyaml==6.0.1 \ --hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ @@ -362,6 +547,7 @@ requests==2.32.5 \ # via # google-api-core # google-cloud-storage + # pygithub rsa==4.9.1 \ --hash=sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762 \ --hash=sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75 @@ -386,8 +572,12 @@ swig==4.3.1 \ typing-extensions==4.15.0 \ --hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \ --hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 - # via -r mlir/python/requirements.txt + # via + # -r mlir/python/requirements.txt + # pygithub urllib3==2.5.0 \ --hash=sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760 \ --hash=sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc - # via requests + # via + # pygithub + # requests diff --git a/.ci/premerge_advisor_explain.py b/.ci/premerge_advisor_explain.py index 94f7949332e3a..e1bc59f389b36 100644 --- a/.ci/premerge_advisor_explain.py +++ b/.ci/premerge_advisor_explain.py @@ -4,20 +4,90 @@ """Script for getting explanations from the premerge advisor.""" import argparse -import os import platform import sys +import json + +# TODO(boomanaiden154): Remove the optional call once we can require Python +# 3.10. +from typing import Optional import requests +import github +import github.PullRequest import generate_test_report_lib PREMERGE_ADVISOR_URL = ( "http://premerge-advisor.premerge-advisor.svc.cluster.local:5000/explain" ) +COMMENT_TAG = "" + + +def get_comment_id(platform: str, pr: github.PullRequest.PullRequest) -> Optional[int]: + platform_comment_tag = COMMENT_TAG.format(platform=platform) + for comment in pr.as_issue().get_comments(): + if platform_comment_tag in comment.body: + return comment.id + return None + + +def get_comment( + github_token: str, + pr_number: int, + body: str, +) -> dict[str, str]: + repo = github.Github(github_token).get_repo("llvm/llvm-project") + pr = repo.get_issue(pr_number).as_pull_request() + comment = {"body": body} + comment_id = get_comment_id(platform.system(), pr) + if comment_id: + comment["id"] = comment_id + return comment -def main(commit_sha: str, build_log_files: list[str]): +def main( + commit_sha: str, + build_log_files: list[str], + github_token: str, + pr_number: int, + return_code: int, +): + """The main entrypoint for the script. + + This function parses failures from files, requests information from the + premerge advisor, and may write a Github comment depending upon the output. + There are four different scenarios: + 1. There has never been a previous failure and the job passes - We do not + create a comment. We write out an empty file to the comment path so the + issue-write workflow knows not to create anything. + 2. There has never been a previous failure and the job fails - We create a + new comment containing the failure information and any possible premerge + advisor findings. + 3. There has been a previous failure and the job passes - We update the + existing comment by passing its ID and a passed message to the + issue-write workflow. + 4. There has been a previous failure and the job fails - We update the + existing comment in the same manner as above, but generate the comment + as if we have a failure. + + Args: + commit_sha: The base commit SHA for this PR run. + build_log_files: The list of JUnit XML files and ninja logs. + github_token: The token to use to access the Github API. + pr_number: The number of the PR associated with this run. + return_code: The numerical return code of ninja/CMake. + """ + if return_code == 0: + with open("comment", "w") as comment_file_handle: + comment = get_comment( + github_token, + pr_number, + ":white_check_mark: With the latest revision this PR passed " + "the premerge checks.", + ) + if "id" in comment: + json.dump([comment], comment_file_handle) junit_objects, ninja_logs = generate_test_report_lib.load_info_from_files( build_log_files ) @@ -45,6 +115,21 @@ def main(commit_sha: str, build_log_files: list[str]): ) if advisor_response.status_code == 200: print(advisor_response.json()) + comments = [ + get_comment( + github_token, + pr_number, + generate_test_report_lib.generate_report( + generate_test_report_lib.compute_platform_title(), + return_code, + junit_objects, + ninja_logs, + failure_explanations_list=advisor_response.json(), + ), + ) + ] + with open("comment", "w") as comment_file_handle: + json.dump(comments, comment_file_handle) else: print(advisor_response.reason) @@ -52,6 +137,9 @@ def main(commit_sha: str, build_log_files: list[str]): if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("commit_sha", help="The base commit SHA for the test.") + parser.add_argument("return_code", help="The build's return code", type=int) + parser.add_argument("github_token", help="Github authentication token", type=str) + parser.add_argument("pr_number", help="The PR number", type=int) parser.add_argument( "build_log_files", help="Paths to JUnit report files and ninja logs.", nargs="*" ) @@ -62,4 +150,10 @@ def main(commit_sha: str, build_log_files: list[str]): if platform.machine() == "arm64": sys.exit(0) - main(args.commit_sha, args.build_log_files) + main( + args.commit_sha, + args.build_log_files, + args.github_token, + args.pr_number, + args.return_code, + ) diff --git a/.ci/requirements.txt b/.ci/requirements.txt index 2fec1baf25fdc..45eb253548496 100644 --- a/.ci/requirements.txt +++ b/.ci/requirements.txt @@ -1,2 +1,3 @@ junitparser==3.2.0 google-cloud-storage==3.3.0 +PyGithub==2.8.1 diff --git a/.ci/utils.sh b/.ci/utils.sh index 540acfa8d5cc5..c364f9395d67b 100644 --- a/.ci/utils.sh +++ b/.ci/utils.sh @@ -33,17 +33,18 @@ function at-exit { # If building fails there will be no results files. shopt -s nullglob - if [[ "$GITHUB_STEP_SUMMARY" != "" ]]; then + if [[ "$GITHUB_ACTIONS" != "" ]]; then python "${MONOREPO_ROOT}"/.ci/generate_test_report_github.py \ $retcode "${BUILD_DIR}"/test-results.*.xml "${MONOREPO_ROOT}"/ninja*.log \ >> $GITHUB_STEP_SUMMARY + python "${MONOREPO_ROOT}"/.ci/premerge_advisor_explain.py \ + $(git rev-parse HEAD~1) $retcode "${GITHUB_TOKEN}" \ + $GITHUB_PR_NUMBER "${BUILD_DIR}"/test-results.*.xml \ + "${MONOREPO_ROOT}"/ninja*.log fi if [[ "$retcode" != "0" ]]; then if [[ "$GITHUB_ACTIONS" != "" ]]; then - python "${MONOREPO_ROOT}"/.ci/premerge_advisor_explain.py \ - $(git rev-parse HEAD~1) "${BUILD_DIR}"/test-results.*.xml \ - "${MONOREPO_ROOT}"/ninja*.log python "${MONOREPO_ROOT}"/.ci/premerge_advisor_upload.py \ $(git rev-parse HEAD~1) $GITHUB_RUN_NUMBER \ "${BUILD_DIR}"/test-results.*.xml "${MONOREPO_ROOT}"/ninja*.log diff --git a/.github/workflows/build-ci-container-tooling.yml b/.github/workflows/build-ci-container-tooling.yml index b94e373746447..531da2ccbd446 100644 --- a/.github/workflows/build-ci-container-tooling.yml +++ b/.github/workflows/build-ci-container-tooling.yml @@ -36,6 +36,9 @@ jobs: test-command: 'cd $HOME && clang-format --version | grep version && git-clang-format -h | grep usage && black --version | grep black' - container-name: lint test-command: 'cd $HOME && clang-tidy --version | grep version && clang-tidy-diff.py -h | grep usage' + - container-name: abi-tests + test-command: 'cd $HOME && abi-compliance-checker --help' + target: abi-tests steps: - name: Checkout LLVM uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -52,7 +55,7 @@ jobs: with: container-name: ci-ubuntu-24.04-${{ matrix.container-name }} dockerfile: .github/workflows/containers/github-action-ci-tooling/Dockerfile - target: ci-container-code-${{ matrix.container-name }} + target: ci-container-${{ matrix.target || format('code-{0}', matrix.container-name) }} test-command: ${{ matrix.test-command }} push-ci-container: diff --git a/.github/workflows/containers/github-action-ci-tooling/Dockerfile b/.github/workflows/containers/github-action-ci-tooling/Dockerfile index 707bdb309b789..be61264b93753 100644 --- a/.github/workflows/containers/github-action-ci-tooling/Dockerfile +++ b/.github/workflows/containers/github-action-ci-tooling/Dockerfile @@ -47,6 +47,28 @@ RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers # as root in 'ci-container-code-format' and 'ci-container-code-lint' containers +FROM base AS ci-container-build-tools +ARG LLVM_VERSION +ARG LLVM_VERSION_MAJOR + +COPY --from=llvm-downloader /llvm-extract/LLVM-${LLVM_VERSION}-Linux-X64/bin/clang-${LLVM_VERSION_MAJOR} \ + ${LLVM_SYSROOT}/bin/ +COPY --from=llvm-downloader /llvm-extract/LLVM-${LLVM_VERSION}-Linux-X64/lib/clang/${LLVM_VERSION_MAJOR}/include \ + ${LLVM_SYSROOT}/lib/clang/${LLVM_VERSION_MAJOR}/include +RUN ln -s ${LLVM_SYSROOT}/bin/clang-${LLVM_VERSION_MAJOR} ${LLVM_SYSROOT}/bin/clang && \ + ln -s ${LLVM_SYSROOT}/bin/clang ${LLVM_SYSROOT}/bin/clang++ + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + cmake \ + ninja-build && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +ENV CC=${LLVM_SYSROOT}/bin/clang +ENV CXX=${LLVM_SYSROOT}/bin/clang++ + + FROM base AS ci-container-code-format ARG LLVM_VERSION @@ -63,31 +85,37 @@ USER gha WORKDIR /home/gha -FROM base AS ci-container-code-lint +FROM ci-container-build-tools AS ci-container-code-lint ARG LLVM_VERSION ARG LLVM_VERSION_MAJOR COPY --from=llvm-downloader /llvm-extract/LLVM-${LLVM_VERSION}-Linux-X64/bin/clang-tidy \ - /llvm-extract/LLVM-${LLVM_VERSION}-Linux-X64/bin/clang-${LLVM_VERSION_MAJOR} \ ${LLVM_SYSROOT}/bin/ -COPY --from=llvm-downloader /llvm-extract/LLVM-${LLVM_VERSION}-Linux-X64/lib/clang/${LLVM_VERSION_MAJOR}/include \ - ${LLVM_SYSROOT}/lib/clang/${LLVM_VERSION_MAJOR}/include COPY clang-tools-extra/clang-tidy/tool/clang-tidy-diff.py ${LLVM_SYSROOT}/bin/clang-tidy-diff.py -RUN ln -s ${LLVM_SYSROOT}/bin/clang-${LLVM_VERSION_MAJOR} ${LLVM_SYSROOT}/bin/clang && \ - ln -s ${LLVM_SYSROOT}/bin/clang ${LLVM_SYSROOT}/bin/clang++ +# Install dependencies for 'pr-code-lint.yml' job +COPY llvm/utils/git/requirements_linting.txt requirements_linting.txt +RUN pip install -r requirements_linting.txt --break-system-packages && \ + rm requirements_linting.txt +USER gha +WORKDIR /home/gha + +FROM ci-container-build-tools as ci-container-abi-tests RUN apt-get update && \ DEBIAN_FRONTEND=noninteractive apt-get install -y \ - cmake \ - ninja-build && \ + abi-compliance-checker \ + abi-dumper \ + autoconf \ + pkg-config && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -# Install dependencies for 'pr-code-lint.yml' job -COPY llvm/utils/git/requirements_linting.txt requirements_linting.txt -RUN pip install -r requirements_linting.txt --break-system-packages && \ - rm requirements_linting.txt -USER gha -WORKDIR /home/gha +RUN git clone https://github.com/universal-ctags/ctags.git && \ + cd ctags && \ + ./autogen.sh && \ + ./configure && \ + sudo make install && \ + rm -Rf ../ctags + diff --git a/.github/workflows/premerge.yaml b/.github/workflows/premerge.yaml index dfe7e0068e84e..02a6f3b868d85 100644 --- a/.github/workflows/premerge.yaml +++ b/.github/workflows/premerge.yaml @@ -64,6 +64,9 @@ jobs: - name: Build and Test timeout-minutes: 120 continue-on-error: ${{ runner.arch == 'ARM64' }} + env: + GITHUB_TOKEN: ${{ github.token }} + GITHUB_PR_NUMBER: ${{ github.event.pull_request.number }} run: | git config --global --add safe.directory '*' @@ -153,6 +156,9 @@ jobs: timeout-minutes: 180 if: ${{ steps.vars.outputs.windows-projects != '' }} shell: cmd + env: + GITHUB_TOKEN: ${{ github.token }} + GITHUB_PR_NUMBER: ${{ github.event.pull_request.number }} run: | call C:\\BuildTools\\Common7\\Tools\\VsDevCmd.bat -arch=amd64 -host_arch=amd64 # See the comments above in the Linux job for why we define each of diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h index 5e349cd69fb43..69ae4fb8ddcc9 100644 --- a/bolt/include/bolt/Core/MCPlusBuilder.h +++ b/bolt/include/bolt/Core/MCPlusBuilder.h @@ -784,6 +784,11 @@ class MCPlusBuilder { virtual bool isPop(const MCInst &Inst) const { return false; } + /// Determine if a basic block looks like an epilogue. For now it is only + /// called at the final stage of building CFG to check basic block ending + /// with an indirect call that has unknown control flow attribute. + virtual bool isEpilogue(const BinaryBasicBlock &BB) const { return false; } + /// Return true if the instruction is used to terminate an indirect branch. virtual bool isTerminateBranch(const MCInst &Inst) const { llvm_unreachable("not implemented"); @@ -1371,20 +1376,13 @@ class MCPlusBuilder { /// Return true if \p Inst has RestoreState annotation. bool hasRestoreState(const MCInst &Inst) const; - /// Stores RA Signed annotation on \p Inst. - void setRASigned(MCInst &Inst) const; - - /// Return true if \p Inst has Signed RA annotation. - bool isRASigned(const MCInst &Inst) const; - - /// Stores RA Unsigned annotation on \p Inst. - void setRAUnsigned(MCInst &Inst) const; - - /// Return true if \p Inst has Unsigned RA annotation. - bool isRAUnsigned(const MCInst &Inst) const; + /// Sets kRASigned or kRAUnsigned annotation on \p Inst. + /// Fails if \p Inst has either annotation already set. + void setRAState(MCInst &Inst, bool State) const; - /// Return true if \p Inst doesn't have any annotation related to RA state. - bool isRAStateUnknown(const MCInst &Inst) const; + /// Return true if \p Inst has kRASigned annotation, false if it has + /// kRAUnsigned annotation, and std::nullopt if neither annotation is set. + std::optional getRAState(const MCInst &Inst) const; /// Return true if the instruction is a call with an exception handling info. virtual bool isInvoke(const MCInst &Inst) const { diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp index 2fd1ab13008b7..a0d8385aa3824 100644 --- a/bolt/lib/Core/BinaryFunction.cpp +++ b/bolt/lib/Core/BinaryFunction.cpp @@ -2167,13 +2167,10 @@ bool BinaryFunction::postProcessIndirectBranches( continue; } - // If this block contains an epilogue code and has an indirect branch, - // then most likely it's a tail call. Otherwise, we cannot tell for sure - // what it is and conservatively reject the function's CFG. - bool IsEpilogue = llvm::any_of(BB, [&](const MCInst &Instr) { - return BC.MIB->isLeave(Instr) || BC.MIB->isPop(Instr); - }); - if (IsEpilogue) { + // If this block contains epilogue code and has an indirect branch, + // then most likely it's a tail call. Otherwise, we cannot tell for + // sure what it is and conservatively reject the function's CFG. + if (BC.MIB->isEpilogue(BB)) { BC.MIB->convertJmpToTailCall(Instr); BB.removeAllSuccessors(); continue; diff --git a/bolt/lib/Core/MCPlusBuilder.cpp b/bolt/lib/Core/MCPlusBuilder.cpp index e96de80bfa701..0cb4ba1ebfbd7 100644 --- a/bolt/lib/Core/MCPlusBuilder.cpp +++ b/bolt/lib/Core/MCPlusBuilder.cpp @@ -186,26 +186,21 @@ bool MCPlusBuilder::hasRestoreState(const MCInst &Inst) const { return hasAnnotation(Inst, MCAnnotation::kRestoreState); } -void MCPlusBuilder::setRASigned(MCInst &Inst) const { +void MCPlusBuilder::setRAState(MCInst &Inst, bool State) const { assert(!hasAnnotation(Inst, MCAnnotation::kRASigned)); - setAnnotationOpValue(Inst, MCAnnotation::kRASigned, true); -} - -bool MCPlusBuilder::isRASigned(const MCInst &Inst) const { - return hasAnnotation(Inst, MCAnnotation::kRASigned); -} - -void MCPlusBuilder::setRAUnsigned(MCInst &Inst) const { assert(!hasAnnotation(Inst, MCAnnotation::kRAUnsigned)); - setAnnotationOpValue(Inst, MCAnnotation::kRAUnsigned, true); + if (State) + setAnnotationOpValue(Inst, MCAnnotation::kRASigned, true); + else + setAnnotationOpValue(Inst, MCAnnotation::kRAUnsigned, true); } -bool MCPlusBuilder::isRAUnsigned(const MCInst &Inst) const { - return hasAnnotation(Inst, MCAnnotation::kRAUnsigned); -} - -bool MCPlusBuilder::isRAStateUnknown(const MCInst &Inst) const { - return !(isRAUnsigned(Inst) || isRASigned(Inst)); +std::optional MCPlusBuilder::getRAState(const MCInst &Inst) const { + if (hasAnnotation(Inst, MCAnnotation::kRASigned)) + return true; + if (hasAnnotation(Inst, MCAnnotation::kRAUnsigned)) + return false; + return std::nullopt; } std::optional MCPlusBuilder::getEHInfo(const MCInst &Inst) const { diff --git a/bolt/lib/Passes/InsertNegateRAStatePass.cpp b/bolt/lib/Passes/InsertNegateRAStatePass.cpp index 33664e1160a7b..775b7795e77c5 100644 --- a/bolt/lib/Passes/InsertNegateRAStatePass.cpp +++ b/bolt/lib/Passes/InsertNegateRAStatePass.cpp @@ -21,7 +21,12 @@ using namespace llvm; namespace llvm { namespace bolt { +static bool PassFailed = false; + void InsertNegateRAState::runOnFunction(BinaryFunction &BF) { + if (PassFailed) + return; + BinaryContext &BC = BF.getBinaryContext(); if (BF.getState() == BinaryFunction::State::Empty) @@ -39,7 +44,7 @@ void InsertNegateRAState::runOnFunction(BinaryFunction &BF) { for (FunctionFragment &FF : BF.getLayout().fragments()) { coverFunctionFragmentStart(BF, FF); bool FirstIter = true; - MCInst PrevInst; + bool PrevRAState = false; // As this pass runs after function splitting, we should only check // consecutive instructions inside FunctionFragments. for (BinaryBasicBlock *BB : FF) { @@ -47,18 +52,23 @@ void InsertNegateRAState::runOnFunction(BinaryFunction &BF) { MCInst &Inst = *It; if (BC.MIB->isCFI(Inst)) continue; + auto RAState = BC.MIB->getRAState(Inst); + if (!RAState) { + BC.errs() << "BOLT-ERROR: unknown RAState after inferUnknownStates " + << " in function " << BF.getPrintName() << "\n"; + PassFailed = true; + return; + } if (!FirstIter) { // Consecutive instructions with different RAState means we need to // add a OpNegateRAState. - if ((BC.MIB->isRASigned(PrevInst) && BC.MIB->isRAUnsigned(Inst)) || - (BC.MIB->isRAUnsigned(PrevInst) && BC.MIB->isRASigned(Inst))) { + if (*RAState != PrevRAState) It = BF.addCFIInstruction( BB, It, MCCFIInstruction::createNegateRAState(nullptr)); - } } else { FirstIter = false; } - PrevInst = *It; + PrevRAState = *RAState; } } } @@ -81,10 +91,17 @@ void InsertNegateRAState::coverFunctionFragmentStart(BinaryFunction &BF, }); // If a function is already split in the input, the first FF can also start // with Signed state. This covers that scenario as well. - if (BC.MIB->isRASigned(*((*FirstNonEmpty)->begin()))) { - BF.addCFIInstruction(*FirstNonEmpty, (*FirstNonEmpty)->begin(), - MCCFIInstruction::createNegateRAState(nullptr)); + auto II = (*FirstNonEmpty)->getFirstNonPseudo(); + auto RAState = BC.MIB->getRAState(*II); + if (!RAState) { + BC.errs() << "BOLT-ERROR: unknown RAState after inferUnknownStates " + << " in function " << BF.getPrintName() << "\n"; + PassFailed = true; + return; } + if (*RAState) + BF.addCFIInstruction(*FirstNonEmpty, II, + MCCFIInstruction::createNegateRAState(nullptr)); } void InsertNegateRAState::inferUnknownStates(BinaryFunction &BF) { @@ -96,15 +113,21 @@ void InsertNegateRAState::inferUnknownStates(BinaryFunction &BF) { if (BC.MIB->isCFI(Inst)) continue; - if (!FirstIter && BC.MIB->isRAStateUnknown(Inst)) { - if (BC.MIB->isRASigned(PrevInst) || BC.MIB->isPSignOnLR(PrevInst)) { - BC.MIB->setRASigned(Inst); - } else if (BC.MIB->isRAUnsigned(PrevInst) || - BC.MIB->isPAuthOnLR(PrevInst)) { - BC.MIB->setRAUnsigned(Inst); + auto RAState = BC.MIB->getRAState(Inst); + if (!FirstIter && !RAState) { + if (BC.MIB->isPSignOnLR(PrevInst)) + RAState = true; + else if (BC.MIB->isPAuthOnLR(PrevInst)) + RAState = false; + else { + auto PrevRAState = BC.MIB->getRAState(PrevInst); + RAState = PrevRAState ? *PrevRAState : false; } + BC.MIB->setRAState(Inst, *RAState); } else { FirstIter = false; + if (!RAState) + BC.MIB->setRAState(Inst, BF.getInitialRAState()); } PrevInst = Inst; } @@ -135,6 +158,8 @@ Error InsertNegateRAState::runOnFunctions(BinaryContext &BC) { << " functions " << format("(%.2lf%%).\n", (100.0 * FunctionsModified) / BC.getBinaryFunctions().size()); + if (PassFailed) + return createFatalBOLTError(""); return Error::success(); } diff --git a/bolt/lib/Passes/MarkRAStates.cpp b/bolt/lib/Passes/MarkRAStates.cpp index b262d66732b7d..51075be0e1ac2 100644 --- a/bolt/lib/Passes/MarkRAStates.cpp +++ b/bolt/lib/Passes/MarkRAStates.cpp @@ -72,9 +72,6 @@ bool MarkRAStates::runOnFunction(BinaryFunction &BF) { BF.setIgnored(); return false; } - // The signing instruction itself is unsigned, the next will be - // signed. - BC.MIB->setRAUnsigned(Inst); } else if (BC.MIB->isPAuthOnLR(Inst)) { if (!RAState) { // RA authenticating instructions should only follow signed RA state. @@ -86,15 +83,10 @@ bool MarkRAStates::runOnFunction(BinaryFunction &BF) { BF.setIgnored(); return false; } - // The authenticating instruction itself is signed, but the next will be - // unsigned. - BC.MIB->setRASigned(Inst); - } else if (RAState) { - BC.MIB->setRASigned(Inst); - } else { - BC.MIB->setRAUnsigned(Inst); } + BC.MIB->setRAState(Inst, RAState); + // Updating RAState. All updates are valid from the next instruction. // Because the same instruction can have remember and restore, the order // here is relevant. This is the reason to loop over Annotations instead diff --git a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp index 3c77091d91ebd..db3989d6b0b5f 100644 --- a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp +++ b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp @@ -164,11 +164,53 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { bool isPush(const MCInst &Inst) const override { return isStoreToStack(Inst); - }; + } bool isPop(const MCInst &Inst) const override { return isLoadFromStack(Inst); - }; + } + + // We look for instructions that load from stack or make stack pointer + // adjustment, and assume the basic block is an epilogue if and only if + // such instructions are present and also immediately precede the branch + // instruction that ends the basic block. + bool isEpilogue(const BinaryBasicBlock &BB) const override { + if (BB.succ_size()) + return false; + + bool SeenLoadFromStack = false; + bool SeenStackPointerAdjustment = false; + for (const MCInst &Instr : BB) { + // Skip CFI pseudo instruction. + if (isCFI(Instr)) + continue; + + bool IsPop = isPop(Instr); + // A load from stack instruction could do SP adjustment in pre-index or + // post-index form, which we can skip to check for epilogue recognition + // purpose. + bool IsSPAdj = (isADD(Instr) || isMOVW(Instr)) && + Instr.getOperand(0).isReg() && + Instr.getOperand(0).getReg() == AArch64::SP; + SeenLoadFromStack |= IsPop; + SeenStackPointerAdjustment |= IsSPAdj; + + if (!SeenLoadFromStack && !SeenStackPointerAdjustment) + continue; + if (IsPop || IsSPAdj || isPAuthOnLR(Instr)) + continue; + if (isReturn(Instr)) + return true; + if (isBranch(Instr)) + break; + + // Any previously seen load from stack or stack adjustment instruction + // is definitely not part of epilogue code sequence, so reset these two. + SeenLoadFromStack = false; + SeenStackPointerAdjustment = false; + } + return SeenLoadFromStack || SeenStackPointerAdjustment; + } void createCall(MCInst &Inst, const MCSymbol *Target, MCContext *Ctx) override { diff --git a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp index 5fca5e813515f..7c24c2ce136fa 100644 --- a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp +++ b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp @@ -219,6 +219,12 @@ class X86MCPlusBuilder : public MCPlusBuilder { return getPopSize(Inst) == 0 ? false : true; } + bool isEpilogue(const BinaryBasicBlock &BB) const override { + return ::llvm::any_of(BB, [&](const MCInst &Instr) { + return isLeave(Instr) || isPop(Instr); + }); + } + bool isTerminateBranch(const MCInst &Inst) const override { return Inst.getOpcode() == X86::ENDBR32 || Inst.getOpcode() == X86::ENDBR64; } diff --git a/bolt/test/AArch64/epilogue-determination.s b/bolt/test/AArch64/epilogue-determination.s new file mode 100644 index 0000000000000..437d8149c0d6b --- /dev/null +++ b/bolt/test/AArch64/epilogue-determination.s @@ -0,0 +1,48 @@ +# Test that we will not incorrectly take the first basic block in function +# `_foo` as epilogue due to the first load from stack instruction. + +# RUN: %clang %cflags %s -o %t.so -Wl,-q +# RUN: llvm-bolt %t.so -o %t.bolt --print-cfg | FileCheck %s + + .text + .global _foo + .type _foo, %function +_foo: + ldr w8, [sp] + adr x10, _jmptbl + ldrsw x9, [x10, x9, lsl #2] + add x10, x10, x9 + br x10 +# CHECK-NOT: x10 # TAILCALL +# CHECK: x10 # UNKNOWN CONTROL FLOW + mov x0, 0 + ret + mov x0, 1 + ret + + .balign 4 +_jmptbl: + .long -16 + .long -8 + + .global _bar + .type _bar, %function +_bar: + stp x29, x30, [sp, #-0x10]! + mov x29, sp + sub sp, sp, #0x10 + ldr x8, [x29, #0x30] + blr x8 + add sp, sp, #0x10 + ldp x29, x30, [sp], #0x10 + br x2 +# CHECK-NOT: x2 # UNKNOWN CONTROL FLOW +# CHECK: x2 # TAILCALL + + .global _start + .type _start, %function +_start: + ret + + # Dummy relocation to force relocation mode + .reloc 0, R_AARCH64_NONE diff --git a/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp b/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp index 3ba1532334e4a..6859dc97c112a 100644 --- a/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp @@ -65,6 +65,7 @@ #include "ParentVirtualCallCheck.h" #include "PointerArithmeticOnPolymorphicObjectCheck.h" #include "PosixReturnCheck.h" +#include "RandomGeneratorSeedCheck.h" #include "RawMemoryCallOnNonTrivialTypeCheck.h" #include "RedundantBranchConditionCheck.h" #include "ReservedIdentifierCheck.h" @@ -230,6 +231,8 @@ class BugproneModule : public ClangTidyModule { CheckFactories.registerCheck( "bugprone-parent-virtual-call"); CheckFactories.registerCheck("bugprone-posix-return"); + CheckFactories.registerCheck( + "bugprone-random-generator-seed"); CheckFactories.registerCheck( "bugprone-raw-memory-call-on-non-trivial-type"); CheckFactories.registerCheck( diff --git a/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt b/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt index 49c467aa5090c..db1256d91d311 100644 --- a/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt @@ -66,6 +66,7 @@ add_clang_library(clangTidyBugproneModule STATIC ParentVirtualCallCheck.cpp PointerArithmeticOnPolymorphicObjectCheck.cpp PosixReturnCheck.cpp + RandomGeneratorSeedCheck.cpp RawMemoryCallOnNonTrivialTypeCheck.cpp RedundantBranchConditionCheck.cpp ReservedIdentifierCheck.cpp diff --git a/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/RandomGeneratorSeedCheck.cpp similarity index 84% rename from clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.cpp rename to clang-tools-extra/clang-tidy/bugprone/RandomGeneratorSeedCheck.cpp index b8bca7286ce69..3e32e9b8a704c 100644 --- a/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/RandomGeneratorSeedCheck.cpp @@ -6,29 +6,28 @@ // //===----------------------------------------------------------------------===// -#include "ProperlySeededRandomGeneratorCheck.h" +#include "RandomGeneratorSeedCheck.h" #include "clang/AST/ASTContext.h" #include "clang/ASTMatchers/ASTMatchFinder.h" #include "llvm/ADT/STLExtras.h" using namespace clang::ast_matchers; -namespace clang::tidy::cert { +namespace clang::tidy::bugprone { -ProperlySeededRandomGeneratorCheck::ProperlySeededRandomGeneratorCheck( - StringRef Name, ClangTidyContext *Context) +RandomGeneratorSeedCheck::RandomGeneratorSeedCheck(StringRef Name, + ClangTidyContext *Context) : ClangTidyCheck(Name, Context), RawDisallowedSeedTypes( Options.get("DisallowedSeedTypes", "time_t,std::time_t")) { RawDisallowedSeedTypes.split(DisallowedSeedTypes, ','); } -void ProperlySeededRandomGeneratorCheck::storeOptions( - ClangTidyOptions::OptionMap &Opts) { +void RandomGeneratorSeedCheck::storeOptions(ClangTidyOptions::OptionMap &Opts) { Options.store(Opts, "DisallowedSeedTypes", RawDisallowedSeedTypes); } -void ProperlySeededRandomGeneratorCheck::registerMatchers(MatchFinder *Finder) { +void RandomGeneratorSeedCheck::registerMatchers(MatchFinder *Finder) { auto RandomGeneratorEngineDecl = cxxRecordDecl(hasAnyName( "::std::linear_congruential_engine", "::std::mersenne_twister_engine", "::std::subtract_with_carry_engine", "::std::discard_block_engine", @@ -75,8 +74,7 @@ void ProperlySeededRandomGeneratorCheck::registerMatchers(MatchFinder *Finder) { this); } -void ProperlySeededRandomGeneratorCheck::check( - const MatchFinder::MatchResult &Result) { +void RandomGeneratorSeedCheck::check(const MatchFinder::MatchResult &Result) { const auto *Ctor = Result.Nodes.getNodeAs("ctor"); if (Ctor) checkSeed(Result, Ctor); @@ -91,8 +89,8 @@ void ProperlySeededRandomGeneratorCheck::check( } template -void ProperlySeededRandomGeneratorCheck::checkSeed( - const MatchFinder::MatchResult &Result, const T *Func) { +void RandomGeneratorSeedCheck::checkSeed(const MatchFinder::MatchResult &Result, + const T *Func) { if (Func->getNumArgs() == 0 || Func->getArg(0)->isDefaultArgument()) { diag(Func->getExprLoc(), "random number generator seeded with a default argument will generate " @@ -118,4 +116,4 @@ void ProperlySeededRandomGeneratorCheck::checkSeed( } } -} // namespace clang::tidy::cert +} // namespace clang::tidy::bugprone diff --git a/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.h b/clang-tools-extra/clang-tidy/bugprone/RandomGeneratorSeedCheck.h similarity index 67% rename from clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.h rename to clang-tools-extra/clang-tidy/bugprone/RandomGeneratorSeedCheck.h index 8cb2e624e0501..c9c54eaa14000 100644 --- a/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.h +++ b/clang-tools-extra/clang-tidy/bugprone/RandomGeneratorSeedCheck.h @@ -6,13 +6,13 @@ // //===----------------------------------------------------------------------===// -#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CERT_PROPERLYSEEDEDRANDOMGENERATORCHECK_H -#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CERT_PROPERLYSEEDEDRANDOMGENERATORCHECK_H +#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_RANDOMGENERATORSEEDCHECK_H +#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_RANDOMGENERATORSEEDCHECK_H #include "../ClangTidyCheck.h" #include -namespace clang::tidy::cert { +namespace clang::tidy::bugprone { /// Random number generator must be seeded properly. /// @@ -20,10 +20,10 @@ namespace clang::tidy::cert { /// constant expression is a security vulnerability. /// /// For the user-facing documentation see: -/// https://clang.llvm.org/extra/clang-tidy/checks/cert/msc51-cpp.html -class ProperlySeededRandomGeneratorCheck : public ClangTidyCheck { +/// https://clang.llvm.org/extra/clang-tidy/checks/bugprone/random-generator-seed.html +class RandomGeneratorSeedCheck : public ClangTidyCheck { public: - ProperlySeededRandomGeneratorCheck(StringRef Name, ClangTidyContext *Context); + RandomGeneratorSeedCheck(StringRef Name, ClangTidyContext *Context); void storeOptions(ClangTidyOptions::OptionMap &Opts) override; void registerMatchers(ast_matchers::MatchFinder *Finder) override; void check(const ast_matchers::MatchFinder::MatchResult &Result) override; @@ -37,6 +37,6 @@ class ProperlySeededRandomGeneratorCheck : public ClangTidyCheck { SmallVector DisallowedSeedTypes; }; -} // namespace clang::tidy::cert +} // namespace clang::tidy::bugprone -#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CERT_PROPERLYSEEDEDRANDOMGENERATORCHECK_H +#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_RANDOMGENERATORSEEDCHECK_H diff --git a/clang-tools-extra/clang-tidy/cert/CERTTidyModule.cpp b/clang-tools-extra/clang-tidy/cert/CERTTidyModule.cpp index 6dbcecee1e023..f46dd4cc6195a 100644 --- a/clang-tools-extra/clang-tidy/cert/CERTTidyModule.cpp +++ b/clang-tools-extra/clang-tidy/cert/CERTTidyModule.cpp @@ -16,6 +16,7 @@ #include "../bugprone/ExceptionCopyConstructorThrowsCheck.h" #include "../bugprone/FloatLoopCounterCheck.h" #include "../bugprone/PointerArithmeticOnPolymorphicObjectCheck.h" +#include "../bugprone/RandomGeneratorSeedCheck.h" #include "../bugprone/RawMemoryCallOnNonTrivialTypeCheck.h" #include "../bugprone/ReservedIdentifierCheck.h" #include "../bugprone/SignalHandlerCheck.h" @@ -41,7 +42,6 @@ #include "../readability/EnumInitialValueCheck.h" #include "../readability/UppercaseLiteralSuffixCheck.h" #include "LimitedRandomnessCheck.h" -#include "ProperlySeededRandomGeneratorCheck.h" namespace { @@ -272,7 +272,7 @@ class CERTModule : public ClangTidyModule { "cert-mem57-cpp"); // MSC CheckFactories.registerCheck("cert-msc50-cpp"); - CheckFactories.registerCheck( + CheckFactories.registerCheck( "cert-msc51-cpp"); CheckFactories.registerCheck( "cert-msc54-cpp"); @@ -325,7 +325,7 @@ class CERTModule : public ClangTidyModule { CheckFactories.registerCheck( "cert-msc24-c"); CheckFactories.registerCheck("cert-msc30-c"); - CheckFactories.registerCheck( + CheckFactories.registerCheck( "cert-msc32-c"); CheckFactories.registerCheck( "cert-msc33-c"); diff --git a/clang-tools-extra/clang-tidy/cert/CMakeLists.txt b/clang-tools-extra/clang-tidy/cert/CMakeLists.txt index 81015a02023ba..0ed903c4826a3 100644 --- a/clang-tools-extra/clang-tidy/cert/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/cert/CMakeLists.txt @@ -6,7 +6,6 @@ set(LLVM_LINK_COMPONENTS add_clang_library(clangTidyCERTModule STATIC CERTTidyModule.cpp LimitedRandomnessCheck.cpp - ProperlySeededRandomGeneratorCheck.cpp LINK_LIBS clangTidy diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index f3d5b6f43a227..2628a26acdf5e 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -283,6 +283,16 @@ New check aliases ` keeping initial check as an alias to the new one. +- Renamed :doc:`cert-msc32-c ` to + :doc:`bugprone-random-generator-seed + ` + keeping initial check as an alias to the new one. + +- Renamed :doc:`cert-msc51-cpp ` to + :doc:`bugprone-random-generator-seed + ` + keeping initial check as an alias to the new one. + - Renamed :doc:`cert-oop57-cpp ` to :doc:`bugprone-raw-memory-call-on-non-trivial-type ` diff --git a/clang-tools-extra/docs/clang-tidy/checks/bugprone/random-generator-seed.rst b/clang-tools-extra/docs/clang-tidy/checks/bugprone/random-generator-seed.rst new file mode 100644 index 0000000000000..25712447f7897 --- /dev/null +++ b/clang-tools-extra/docs/clang-tidy/checks/bugprone/random-generator-seed.rst @@ -0,0 +1,44 @@ +.. title:: clang-tidy - bugprone-random-generator-seed + +bugprone-random-generator-seed +============================== + +Flags all pseudo-random number engines, engine adaptor +instantiations and ``srand()`` when initialized or seeded with default argument, +constant expression or any user-configurable type. Pseudo-random number +engines seeded with a predictable value may cause vulnerabilities e.g. in +security protocols. + +Examples: + +.. code-block:: c++ + + void foo() { + std::mt19937 engine1; // Diagnose, always generate the same sequence + std::mt19937 engine2(1); // Diagnose + engine1.seed(); // Diagnose + engine2.seed(1); // Diagnose + + std::time_t t; + engine1.seed(std::time(&t)); // Diagnose, system time might be controlled by user + + int x = atoi(argv[1]); + std::mt19937 engine3(x); // Will not warn + } + +Options +------- + +.. option:: DisallowedSeedTypes + + A comma-separated list of the type names which are disallowed. + Default value is `time_t,std::time_t`. + +References +---------- + +This check corresponds to the CERT C++ Coding Standard rules +`MSC51-CPP. Ensure your random number generator is properly seeded +`_ and +`MSC32-C. Properly seed pseudorandom number generators +`_. \ No newline at end of file diff --git a/clang-tools-extra/docs/clang-tidy/checks/cert/msc32-c.rst b/clang-tools-extra/docs/clang-tidy/checks/cert/msc32-c.rst index 6e453edefa76e..e0ed8074185ca 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cert/msc32-c.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cert/msc32-c.rst @@ -1,9 +1,14 @@ .. title:: clang-tidy - cert-msc32-c .. meta:: - :http-equiv=refresh: 5;URL=../cert/msc51-cpp.html + :http-equiv=refresh: 5;URL=../bugprone/random-generator-seed.html cert-msc32-c ============ The `cert-msc32-c` check is an alias, please see -:doc:`cert-msc51-cpp <../cert/msc51-cpp>` for more information. +:doc:`bugprone-random-generator-seed <../bugprone/random-generator-seed>` +for more information. + +This check corresponds to the CERT C Coding Standard rule +`MSC32-C. Properly seed pseudorandom number generators +`_. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cert/msc51-cpp.rst b/clang-tools-extra/docs/clang-tidy/checks/cert/msc51-cpp.rst index 99e550aef0e7a..a9b8672091bc6 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cert/msc51-cpp.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cert/msc51-cpp.rst @@ -1,40 +1,14 @@ .. title:: clang-tidy - cert-msc51-cpp +.. meta:: + :http-equiv=refresh: 5;URL=../bugprone/random-generator-seed.html cert-msc51-cpp ============== -This check flags all pseudo-random number engines, engine adaptor -instantiations and ``srand()`` when initialized or seeded with default argument, -constant expression or any user-configurable type. Pseudo-random number -engines seeded with a predictable value may cause vulnerabilities e.g. in -security protocols. -This is a CERT security rule, see -`MSC51-CPP. Ensure your random number generator is properly seeded -`_ and -`MSC32-C. Properly seed pseudorandom number generators -`_. - -Examples: - -.. code-block:: c++ - - void foo() { - std::mt19937 engine1; // Diagnose, always generate the same sequence - std::mt19937 engine2(1); // Diagnose - engine1.seed(); // Diagnose - engine2.seed(1); // Diagnose - - std::time_t t; - engine1.seed(std::time(&t)); // Diagnose, system time might be controlled by user +The `cert-msc51-cpp` check is an alias, please see +:doc:`bugprone-random-generator-seed <../bugprone/random-generator-seed>` +for more information. - int x = atoi(argv[1]); - std::mt19937 engine3(x); // Will not warn - } - -Options -------- - -.. option:: DisallowedSeedTypes - - A comma-separated list of the type names which are disallowed. - Default value is `time_t,std::time_t`. +This check corresponds to the CERT C++ Coding Standard rule +`MSC51-CPP. Ensure your random number generator is properly seeded +`_. \ No newline at end of file diff --git a/clang-tools-extra/docs/clang-tidy/checks/list.rst b/clang-tools-extra/docs/clang-tidy/checks/list.rst index 3b0ff3ef33365..a4014b5f15f0b 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/list.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/list.rst @@ -133,6 +133,7 @@ Clang-Tidy Checks :doc:`bugprone-parent-virtual-call `, "Yes" :doc:`bugprone-pointer-arithmetic-on-polymorphic-object `, :doc:`bugprone-posix-return `, "Yes" + :doc:`bugprone-random-generator-seed `, :doc:`bugprone-raw-memory-call-on-non-trivial-type `, :doc:`bugprone-redundant-branch-condition `, "Yes" :doc:`bugprone-reserved-identifier `, "Yes" @@ -183,7 +184,6 @@ Clang-Tidy Checks :doc:`cert-err60-cpp `, :doc:`cert-flp30-c `, :doc:`cert-msc50-cpp `, - :doc:`cert-msc51-cpp `, :doc:`cert-oop58-cpp `, :doc:`concurrency-mt-unsafe `, :doc:`concurrency-thread-canceltype-asynchronous `, @@ -461,8 +461,9 @@ Check aliases :doc:`cert-mem57-cpp `, :doc:`bugprone-default-operator-new-on-overaligned-type `, :doc:`cert-msc24-c `, :doc:`bugprone-unsafe-functions `, :doc:`cert-msc30-c `, :doc:`cert-msc50-cpp `, - :doc:`cert-msc32-c `, :doc:`cert-msc51-cpp `, + :doc:`cert-msc32-c `, :doc:`bugprone-random-generator-seed `, :doc:`cert-msc33-c `, :doc:`bugprone-unsafe-functions `, + :doc:`cert-msc51-cpp `, :doc:`bugprone-random-generator-seed `, :doc:`cert-msc54-cpp `, :doc:`bugprone-signal-handler `, :doc:`cert-oop11-cpp `, :doc:`performance-move-constructor-init `, :doc:`cert-oop54-cpp `, :doc:`bugprone-unhandled-self-assignment `, diff --git a/clang-tools-extra/test/clang-tidy/checkers/cert/msc32-c.c b/clang-tools-extra/test/clang-tidy/checkers/bugprone/random-generator-seed.c similarity index 66% rename from clang-tools-extra/test/clang-tidy/checkers/cert/msc32-c.c rename to clang-tools-extra/test/clang-tidy/checkers/bugprone/random-generator-seed.c index 0a1d79b4d916b..7f2a0685e4a2e 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/cert/msc32-c.c +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/random-generator-seed.c @@ -1,4 +1,5 @@ -// RUN: %check_clang_tidy %s cert-msc32-c %t -- -config="{CheckOptions: {cert-msc32-c.DisallowedSeedTypes: 'some_type,time_t'}}" -- -std=c99 +// RUN: %check_clang_tidy %s bugprone-random-generator-seed %t -- \ +// RUN: -config="{CheckOptions: {bugprone-random-generator-seed.DisallowedSeedTypes: 'some_type,time_t'}}" void srand(int seed); typedef int time_t; @@ -6,15 +7,15 @@ time_t time(time_t *t); void f(void) { srand(1); - // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc32-c] + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] const int a = 1; srand(a); - // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc32-c] + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] time_t t; srand(time(&t)); // Disallowed seed type - // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc32-c] + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] } void g(void) { diff --git a/clang-tools-extra/test/clang-tidy/checkers/cert/msc51-cpp.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/random-generator-seed.cpp similarity index 77% rename from clang-tools-extra/test/clang-tidy/checkers/cert/msc51-cpp.cpp rename to clang-tools-extra/test/clang-tidy/checkers/bugprone/random-generator-seed.cpp index 637ba58688abe..c8818d6770799 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/cert/msc51-cpp.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/random-generator-seed.cpp @@ -1,5 +1,5 @@ -// RUN: %check_clang_tidy %s cert-msc51-cpp %t -- \ -// RUN: -config="{CheckOptions: {cert-msc51-cpp.DisallowedSeedTypes: 'some_type,time_t'}}" +// RUN: %check_clang_tidy %s bugprone-random-generator-seed %t -- \ +// RUN: -config="{CheckOptions: {bugprone-random-generator-seed.DisallowedSeedTypes: 'some_type,time_t'}}" namespace std { @@ -71,114 +71,114 @@ void f() { time_t t; std::srand(0); - // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::srand(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::srand(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] // One instantiation for every engine std::default_random_engine engine1; - // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] std::default_random_engine engine2(1); - // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::default_random_engine engine3(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::default_random_engine engine4(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] engine1.seed(); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] engine1.seed(1); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine1.seed(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine1.seed(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] std::mt19937 engine5; - // CHECK-MESSAGES: :[[@LINE-1]]:16: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:16: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] std::mt19937 engine6(1); - // CHECK-MESSAGES: :[[@LINE-1]]:16: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:16: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::mt19937 engine7(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:16: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:16: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::mt19937 engine8(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:16: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:16: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] engine5.seed(); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] engine5.seed(1); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine5.seed(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine5.seed(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] std::ranlux24_base engine9; - // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] std::ranlux24_base engine10(1); - // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::ranlux24_base engine11(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::ranlux24_base engine12(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] engine9.seed(); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] engine9.seed(1); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine9.seed(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine9.seed(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] std::ranlux24 engine13; - // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] std::ranlux24 engine14(1); - // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::ranlux24 engine15(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::ranlux24 engine16(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] engine13.seed(); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] engine13.seed(1); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine13.seed(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine13.seed(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] std::independent_bits engine17; - // CHECK-MESSAGES: :[[@LINE-1]]:25: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:25: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] std::independent_bits engine18(1); - // CHECK-MESSAGES: :[[@LINE-1]]:25: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:25: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::independent_bits engine19(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:25: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:25: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::independent_bits engine20(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:25: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:25: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] engine17.seed(); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] engine17.seed(1); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine17.seed(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine17.seed(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] std::shuffle_order engine21; - // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] std::shuffle_order engine22(1); - // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::shuffle_order engine23(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] std::shuffle_order engine24(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] engine21.seed(); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a default argument will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a default argument will generate a predictable sequence of values [bugprone-random-generator-seed] engine21.seed(1); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine21.seed(seed); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a constant value will generate a predictable sequence of values [bugprone-random-generator-seed] engine21.seed(time(&t)); - // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [cert-msc51-cpp] + // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: random number generator seeded with a disallowed source of seed value will generate a predictable sequence of values [bugprone-random-generator-seed] } struct A { diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 08bbb9d5c76d3..5f356daec2d04 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -861,6 +861,8 @@ NetBSD Support WebAssembly Support ^^^^^^^^^^^^^^^^^^^ +- Fix a bug so that ``__has_attribute(musttail)`` is no longer true when WebAssembly's tail-call is not enabled. (#GH163256) + AVR Support ^^^^^^^^^^^ diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td index 1013bfc575747..aac8c1f550cb2 100644 --- a/clang/include/clang/Basic/Attr.td +++ b/clang/include/clang/Basic/Attr.td @@ -508,6 +508,10 @@ def TargetMicrosoftRecordLayout : TargetArch<["x86", "x86_64", "arm", "thumb", let CustomCode = [{ Target.hasMicrosoftRecordLayout() }]; } +def TargetMustTailAvaiable: TargetSpec { + let CustomCode = [{ Target.hasMustTail() }]; +} + def TargetELF : TargetSpec { let ObjectFormats = ["ELF"]; } @@ -1069,7 +1073,7 @@ def AVRSignal : InheritableAttr, TargetSpecificAttr { } def AsmLabel : InheritableAttr { - let Spellings = [CustomKeyword<"asm">, CustomKeyword<"__asm__">]; + let Spellings = [CustomKeyword<"asm">, CustomKeyword<"__asm">, CustomKeyword<"__asm__">]; let Args = [ // Label specifies the mangled name for the decl. StringArgument<"Label">, ]; @@ -1913,7 +1917,7 @@ def NoMerge : DeclOrStmtAttr { "functions, statements and variables">; } -def MustTail : StmtAttr { +def MustTail : StmtAttr, TargetSpecificAttr { let Spellings = [Clang<"musttail">]; let Documentation = [MustTailDocs]; let Subjects = SubjectList<[ReturnStmt], ErrorDiag, "return statements">; diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td index 1be9a96aa44de..f1dbd8af6093a 100644 --- a/clang/include/clang/Basic/AttrDocs.td +++ b/clang/include/clang/Basic/AttrDocs.td @@ -4295,17 +4295,17 @@ used by other languages. (This prefix is also added to the standard Itanium C++ ABI prefix on "mangled" symbol names, so that e.g. on such targets the true symbol name for a C++ variable declared as ``int cppvar;`` would be ``__Z6cppvar``; note the two underscores.) This prefix is *not* added to the -symbol names specified by the ``asm`` attribute; programmers wishing to match a -C symbol name must compensate for this. +symbol names specified by the ``__asm`` attribute; programmers wishing to match +a C symbol name must compensate for this. For example, consider the following C code: .. code-block:: c - int var1 asm("altvar") = 1; // "altvar" in symbol table. + int var1 __asm("altvar") = 1; // "altvar" in symbol table. int var2 = 1; // "_var2" in symbol table. - void func1(void) asm("altfunc"); + void func1(void) __asm("altfunc"); void func1(void) {} // "altfunc" in symbol table. void func2(void) {} // "_func2" in symbol table. diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td index edff241a98738..cd5f2c3012712 100644 --- a/clang/include/clang/Basic/BuiltinsX86.td +++ b/clang/include/clang/Basic/BuiltinsX86.td @@ -93,22 +93,6 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in { } - let Features = "sse2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { - def pavgb128 : X86Builtin<"_Vector<16, unsigned char>(_Vector<16, unsigned char>, _Vector<16, unsigned char>)">; - def pavgw128 : X86Builtin<"_Vector<8, unsigned short>(_Vector<8, unsigned short>, _Vector<8, unsigned short>)">; - def pmulhw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">; - def pmulhuw128 : X86Builtin<"_Vector<8, unsigned short>(_Vector<8, unsigned short>, _Vector<8, unsigned short>)">; - def packsswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">; - def packssdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">; - def packuswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">; - - def vec_ext_v2di : X86Builtin<"long long int(_Vector<2, long long int>, _Constant int)">; - def vec_ext_v4si : X86Builtin<"int(_Vector<4, int>, _Constant int)">; - def vec_ext_v4sf : X86Builtin<"float(_Vector<4, float>, _Constant int)">; - def vec_ext_v8hi : X86Builtin<"short(_Vector<8, short>, _Constant int)">; - def vec_set_v8hi : X86Builtin<"_Vector<8, short>(_Vector<8, short>, short, _Constant int)">; - } - let Features = "sse3" in { foreach Op = ["addsub"] in { def Op#ps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>)">; @@ -219,15 +203,6 @@ let Features = "sse2", Attributes = [NoThrow] in { def movnti : X86Builtin<"void(int *, int)">; } -let Features = "sse2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { - def pshuflw : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Constant int)">; - def pshufd : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Constant int)">; - def pshufhw : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Constant int)">; - def movmskpd : X86Builtin<"int(_Vector<2, double>)">; - def pmovmskb128 : X86Builtin<"int(_Vector<16, char>)">; - def shufpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Constant int)">; -} - let Features = "sse2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { def psadbw128 : X86Builtin<"_Vector<2, long long int>(_Vector<16, char>, _Vector<16, char>)">; def sqrtpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>)">; @@ -285,12 +260,27 @@ let Features = "sse2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] i def psllq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">; } -let Features = "sse2", - Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { +let Features = "sse2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { + def movmskpd : X86Builtin<"int(_Vector<2, double>)">; + def pmovmskb128 : X86Builtin<"int(_Vector<16, char>)">; + + def pavgb128 : X86Builtin<"_Vector<16, unsigned char>(_Vector<16, unsigned char>, _Vector<16, unsigned char>)">; + def pavgw128 : X86Builtin<"_Vector<8, unsigned short>(_Vector<8, unsigned short>, _Vector<8, unsigned short>)">; + def pmaddwd128 : X86Builtin<"_Vector<4, int>(_Vector<8, short>, _Vector<8, short>)">; - + def pmulhw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">; + def pmulhuw128 : X86Builtin<"_Vector<8, unsigned short>(_Vector<8, unsigned short>, _Vector<8, unsigned short>)">; def pmuludq128 : X86Builtin<"_Vector<2, long long int>(_Vector<4, int>, _Vector<4, int>)">; + def packsswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">; + def packssdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">; + def packuswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">; + + def pshuflw : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Constant int)">; + def pshufd : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Constant int)">; + def pshufhw : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Constant int)">; + def shufpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Constant int)">; + def psllwi128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, int)">; def pslldi128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int)">; def psllqi128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, int)">; @@ -304,6 +294,12 @@ let Features = "sse2", def pslldqi128_byteshift : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Constant int)">; def psrldqi128_byteshift : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Constant int)">; + + def vec_ext_v2di : X86Builtin<"long long int(_Vector<2, long long int>, _Constant int)">; + def vec_ext_v4si : X86Builtin<"int(_Vector<4, int>, _Constant int)">; + def vec_ext_v4sf : X86Builtin<"float(_Vector<4, float>, _Constant int)">; + def vec_ext_v8hi : X86Builtin<"short(_Vector<8, short>, _Constant int)">; + def vec_set_v8hi : X86Builtin<"_Vector<8, short>(_Vector<8, short>, short, _Constant int)">; } let Features = "sse3", Attributes = [NoThrow] in { diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h index ea73ed915bf03..39af84c8d0872 100644 --- a/clang/include/clang/Basic/TargetInfo.h +++ b/clang/include/clang/Basic/TargetInfo.h @@ -229,6 +229,7 @@ class TargetInfo : public TransferrableTargetInfo, protected: // Target values set by the ctor of the actual target implementation. Default // values are specified by the TargetInfo constructor. + bool HasMustTail; bool BigEndian; bool TLSSupported; bool VLASupported; @@ -669,6 +670,8 @@ class TargetInfo : public TransferrableTargetInfo, : getLongFractScale() + 1; } + virtual bool hasMustTail() const { return HasMustTail; } + /// Determine whether the __int128 type is supported on this target. virtual bool hasInt128Type() const { return (getPointerWidth(LangAS::Default) >= 64) || diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index af1ffffcf54c0..b64e07ff2bfb8 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -189,6 +189,7 @@ struct MissingFeatures { // Misc static bool abiArgInfo() { return false; } + static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } static bool aggEmitFinalDestCopyRValue() { return false; } static bool aggValueSlot() { return false; } @@ -198,6 +199,7 @@ struct MissingFeatures { static bool aggValueSlotMayOverlap() { return false; } static bool aggValueSlotVolatile() { return false; } static bool alignCXXRecordDecl() { return false; } + static bool appleKext() { return false; } static bool armComputeVolatileBitfields() { return false; } static bool asmGoto() { return false; } static bool asmInputOperands() { return false; } @@ -244,6 +246,7 @@ struct MissingFeatures { static bool deleteArray() { return false; } static bool devirtualizeDestructor() { return false; } static bool devirtualizeMemberFunction() { return false; } + static bool dtorCleanups() { return false; } static bool ehCleanupFlags() { return false; } static bool ehCleanupHasPrebranchedFallthrough() { return false; } static bool ehCleanupScope() { return false; } @@ -289,6 +292,7 @@ struct MissingFeatures { static bool objCGC() { return false; } static bool objCLifetime() { return false; } static bool hlsl() { return false; } + static bool msvcBuiltins() { return false; } static bool openCL() { return false; } static bool openMP() { return false; } static bool opTBAA() { return false; } @@ -303,6 +307,10 @@ struct MissingFeatures { static bool setNonGC() { return false; } static bool setObjCGCLValueClass() { return false; } static bool setTargetAttributes() { return false; } + static bool shouldCreateMemCpyFromGlobal() { return false; } + static bool shouldSplitConstantStore() { return false; } + static bool shouldUseBZeroPlusStoresToInitialize() { return false; } + static bool shouldUseMemSetToInitialize() { return false; } static bool simplifyCleanupEntry() { return false; } static bool sourceLanguageCases() { return false; } static bool stackBase() { return false; } @@ -314,16 +322,14 @@ struct MissingFeatures { static bool thunks() { return false; } static bool tryEmitAsConstant() { return false; } static bool typeChecks() { return false; } - static bool weakRefReference() { return false; } - static bool writebacks() { return false; } - static bool appleKext() { return false; } - static bool dtorCleanups() { return false; } + static bool vaArgABILowering() { return false; } + static bool vectorConstants() { return false; } + static bool vlas() { return false; } static bool vtableInitialization() { return false; } static bool vtableEmitMetadata() { return false; } static bool vtableRelativeLayout() { return false; } - static bool msvcBuiltins() { return false; } - static bool vaArgABILowering() { return false; } - static bool vlas() { return false; } + static bool weakRefReference() { return false; } + static bool writebacks() { return false; } // Missing types static bool dataMemberType() { return false; } diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp index bf51c3e42719c..735f3157b694e 100644 --- a/clang/lib/AST/ASTImporter.cpp +++ b/clang/lib/AST/ASTImporter.cpp @@ -696,6 +696,10 @@ namespace clang { ExpectedStmt VisitCXXFoldExpr(CXXFoldExpr *E); ExpectedStmt VisitRequiresExpr(RequiresExpr* E); ExpectedStmt VisitConceptSpecializationExpr(ConceptSpecializationExpr* E); + ExpectedStmt + VisitSubstNonTypeTemplateParmPackExpr(SubstNonTypeTemplateParmPackExpr *E); + ExpectedStmt VisitPseudoObjectExpr(PseudoObjectExpr *E); + ExpectedStmt VisitCXXParenListInitExpr(CXXParenListInitExpr *E); // Helper for chaining together multiple imports. If an error is detected, // subsequent imports will return default constructed nodes, so that failure @@ -9273,6 +9277,50 @@ ASTNodeImporter::VisitConceptSpecializationExpr(ConceptSpecializationExpr *E) { const_cast(CSD), &Satisfaction); } +ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmPackExpr( + SubstNonTypeTemplateParmPackExpr *E) { + Error Err = Error::success(); + auto ToType = importChecked(Err, E->getType()); + auto ToPackLoc = importChecked(Err, E->getParameterPackLocation()); + auto ToArgPack = importChecked(Err, E->getArgumentPack()); + auto ToAssociatedDecl = importChecked(Err, E->getAssociatedDecl()); + if (Err) + return std::move(Err); + + return new (Importer.getToContext()) SubstNonTypeTemplateParmPackExpr( + ToType, E->getValueKind(), ToPackLoc, ToArgPack, ToAssociatedDecl, + E->getIndex(), E->getFinal()); +} + +ExpectedStmt ASTNodeImporter::VisitPseudoObjectExpr(PseudoObjectExpr *E) { + SmallVector ToSemantics(E->getNumSemanticExprs()); + if (Error Err = ImportContainerChecked(E->semantics(), ToSemantics)) + return std::move(Err); + auto ToSyntOrErr = import(E->getSyntacticForm()); + if (!ToSyntOrErr) + return ToSyntOrErr.takeError(); + return PseudoObjectExpr::Create(Importer.getToContext(), *ToSyntOrErr, + ToSemantics, E->getResultExprIndex()); +} + +ExpectedStmt +ASTNodeImporter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) { + Error Err = Error::success(); + auto ToType = importChecked(Err, E->getType()); + auto ToInitLoc = importChecked(Err, E->getInitLoc()); + auto ToBeginLoc = importChecked(Err, E->getBeginLoc()); + auto ToEndLoc = importChecked(Err, E->getEndLoc()); + if (Err) + return std::move(Err); + + SmallVector ToArgs(E->getInitExprs().size()); + if (Error Err = ImportContainerChecked(E->getInitExprs(), ToArgs)) + return std::move(Err); + return CXXParenListInitExpr::Create(Importer.getToContext(), ToArgs, ToType, + E->getUserSpecifiedInitExprs().size(), + ToInitLoc, ToBeginLoc, ToEndLoc); +} + Error ASTNodeImporter::ImportOverriddenMethods(CXXMethodDecl *ToMethod, CXXMethodDecl *FromMethod) { Error ImportErrors = Error::success(); diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp index f4d7c1288cc04..9a5db6e164f66 100644 --- a/clang/lib/Basic/TargetInfo.cpp +++ b/clang/lib/Basic/TargetInfo.cpp @@ -59,6 +59,7 @@ static const LangASMap FakeAddrSpaceMap = { TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) { // Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or // SPARC. These should be overridden by concrete targets as needed. + HasMustTail = true; BigEndian = !T.isLittleEndian(); TLSSupported = true; VLASupported = true; diff --git a/clang/lib/Basic/Targets/WebAssembly.cpp b/clang/lib/Basic/Targets/WebAssembly.cpp index 55ffe1df0ba08..5bbb7af4c2ca1 100644 --- a/clang/lib/Basic/Targets/WebAssembly.cpp +++ b/clang/lib/Basic/Targets/WebAssembly.cpp @@ -213,6 +213,7 @@ bool WebAssemblyTargetInfo::initFeatureMap( bool WebAssemblyTargetInfo::handleTargetFeatures( std::vector &Features, DiagnosticsEngine &Diags) { + HasMustTail = false; for (const auto &Feature : Features) { if (Feature == "+atomics") { HasAtomics = true; @@ -345,10 +346,12 @@ bool WebAssemblyTargetInfo::handleTargetFeatures( } if (Feature == "+tail-call") { HasTailCall = true; + HasMustTail = true; continue; } if (Feature == "-tail-call") { HasTailCall = false; + HasMustTail = false; continue; } if (Feature == "+wide-arithmetic") { diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index aeea0efeb77c3..325875d10d6ea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -50,6 +50,41 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d, Address address = Address::invalid(); if (ty->isConstantSizeType()) { + // If this value is an array, struct, or vector with a statically + // determinable constant initializer, there are optimizations we can do. + // + // TODO: We should constant-evaluate the initializer of any variable, + // as long as it is initialized by a constant expression. Currently, + // isConstantInitializer produces wrong answers for structs with + // reference or bitfield members, and a few other cases, and checking + // for POD-ness protects us from some of these. + if (d.getInit() && + (ty->isArrayType() || ty->isRecordType() || ty->isVectorType()) && + (d.isConstexpr() || + ((ty.isPODType(getContext()) || + getContext().getBaseElementType(ty)->isObjCObjectPointerType()) && + d.getInit()->isConstantInitializer(getContext(), false)))) { + + // If the variable's a const type, and it's neither an NRVO + // candidate nor a __block variable and has no mutable members, + // emit it as a global instead. + // Exception is if a variable is located in non-constant address space + // in OpenCL. + // TODO(cir): perhaps we don't need this at all at CIR since this can + // be done as part of lowering down to LLVM. + bool needsDtor = + d.needsDestruction(getContext()) == QualType::DK_cxx_destructor; + if ((!getContext().getLangOpts().OpenCL || + ty.getAddressSpace() == LangAS::opencl_constant) && + (cgm.getCodeGenOpts().MergeAllConstants && !nrvo && + !d.isEscapingByref() && + ty.isConstantStorage(getContext(), true, !needsDtor))) { + cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: type constant"); + } + // Otherwise, tell the initialization code that we're in this case. + emission.isConstantAggregate = true; + } + // A normal fixed sized variable becomes an alloca in the entry block, // unless: // - it's an NRVO variable. @@ -131,6 +166,47 @@ bool CIRGenFunction::isTrivialInitializer(const Expr *init) { return false; } +static void emitStoresForConstant(CIRGenModule &cgm, const VarDecl &d, + Address addr, bool isVolatile, + CIRGenBuilderTy &builder, + mlir::TypedAttr constant) { + mlir::Type ty = constant.getType(); + cir::CIRDataLayout layout{cgm.getModule()}; + uint64_t constantSize = layout.getTypeAllocSize(ty); + if (!constantSize) + return; + assert(!cir::MissingFeatures::addAutoInitAnnotation()); + assert(!cir::MissingFeatures::vectorConstants()); + assert(!cir::MissingFeatures::shouldUseBZeroPlusStoresToInitialize()); + assert(!cir::MissingFeatures::shouldUseMemSetToInitialize()); + assert(!cir::MissingFeatures::shouldSplitConstantStore()); + assert(!cir::MissingFeatures::shouldCreateMemCpyFromGlobal()); + // In CIR we want to emit a store for the whole thing, later lowering + // prepare to LLVM should unwrap this into the best policy (see asserts + // above). + // + // FIXME(cir): This is closer to memcpy behavior but less optimal, instead of + // copy from a global, we just create a cir.const out of it. + + if (addr.getElementType() != ty) + addr = addr.withElementType(builder, ty); + + // If the address is an alloca, set the init attribute. + // The address is usually and alloca, but there is at least one case where + // emitAutoVarInit is called from the OpenACC codegen with an address that + // is not an alloca. + auto allocaOp = addr.getDefiningOp(); + if (allocaOp) + allocaOp.setInitAttr(mlir::UnitAttr::get(&cgm.getMLIRContext())); + + // There are cases where OpenACC codegen calls emitAutoVarInit with a + // temporary decl that doesn't have a source range set. + mlir::Location loc = builder.getUnknownLoc(); + if (d.getSourceRange().isValid()) + loc = cgm.getLoc(d.getSourceRange()); + builder.createStore(loc, builder.getConstant(loc, constant), addr); +} + void CIRGenFunction::emitAutoVarInit( const CIRGenFunction::AutoVarEmission &emission) { assert(emission.variable && "emission was not valid!"); @@ -237,6 +313,9 @@ void CIRGenFunction::emitAutoVarInit( return emitStoreThroughLValue( RValue::get(builder.getConstant(initLoc, typedConstant)), lv); } + + emitStoresForConstant(cgm, d, addr, type.isVolatileQualified(), builder, + typedConstant); } void CIRGenFunction::emitAutoVarCleanups( diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index d2b8bf5497d26..4cb47cb5aea5a 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -38,6 +38,7 @@ #include "llvm/IR/Attributes.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" @@ -6289,6 +6290,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), RetTy); + // Generate function declaration DISuprogram in order to be used + // in debug info about call sites. + if (CGDebugInfo *DI = getDebugInfo()) { + // Ensure call site info would actually be emitted before collecting + // further callee info. + if (CalleeDecl && !CalleeDecl->hasAttr() && + DI->getCallSiteRelatedAttrs() != llvm::DINode::FlagZero) { + CodeGenFunction CalleeCGF(CGM); + const GlobalDecl &CalleeGlobalDecl = + Callee.getAbstractInfo().getCalleeDecl(); + CalleeCGF.CurGD = CalleeGlobalDecl; + FunctionArgList Args; + QualType ResTy = CalleeCGF.BuildFunctionArgList(CalleeGlobalDecl, Args); + DI->EmitFuncDeclForCallSite( + CI, DI->getFunctionType(CalleeDecl, ResTy, Args), CalleeGlobalDecl); + } + } + return Ret; } diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp index c4c34d9c0f5b5..d20497ba45ec1 100644 --- a/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/clang/lib/CodeGen/CGDebugInfo.cpp @@ -5019,7 +5019,7 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc, void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke, QualType CalleeType, - const FunctionDecl *CalleeDecl) { + GlobalDecl CalleeGlobalDecl) { if (!CallOrInvoke) return; auto *Func = dyn_cast(CallOrInvoke->getCalledOperand()); @@ -5028,6 +5028,9 @@ void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke, if (Func->getSubprogram()) return; + const FunctionDecl *CalleeDecl = + cast(CalleeGlobalDecl.getDecl()); + // Do not emit a declaration subprogram for a function with nodebug // attribute, or if call site info isn't required. if (CalleeDecl->hasAttr() || @@ -5038,7 +5041,8 @@ void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke, // create the one describing the function in order to have complete // call site debug info. if (!CalleeDecl->isStatic() && !CalleeDecl->isInlined()) - EmitFunctionDecl(CalleeDecl, CalleeDecl->getLocation(), CalleeType, Func); + EmitFunctionDecl(CalleeGlobalDecl, CalleeDecl->getLocation(), CalleeType, + Func); } void CGDebugInfo::EmitInlineFunctionStart(CGBuilderTy &Builder, GlobalDecl GD) { diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h index 6ea825f9693c0..a4d26d168ea79 100644 --- a/clang/lib/CodeGen/CGDebugInfo.h +++ b/clang/lib/CodeGen/CGDebugInfo.h @@ -511,7 +511,7 @@ class CGDebugInfo { /// This is needed for call site debug info. void EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke, QualType CalleeType, - const FunctionDecl *CalleeDecl); + GlobalDecl CalleeGlobalDecl); /// Constructs the debug code for exiting a function. void EmitFunctionEnd(CGBuilderTy &Builder, llvm::Function *Fn); @@ -686,6 +686,10 @@ class CGDebugInfo { /// Emit symbol for debugger that holds the pointer to the vtable. void emitVTableSymbol(llvm::GlobalVariable *VTable, const CXXRecordDecl *RD); + /// Return flags which enable debug info emission for call sites, provided + /// that it is supported and enabled. + llvm::DINode::DIFlags getCallSiteRelatedAttrs() const; + private: /// Amend \p I's DebugLoc with \p Group (its source atom group) and \p /// Rank (lower nonzero rank is higher precedence). Does nothing if \p I @@ -864,10 +868,6 @@ class CGDebugInfo { StringRef LinkageName, llvm::dwarf::MemorySpace MS, llvm::GlobalVariable *Var, llvm::DIScope *DContext); - /// Return flags which enable debug info emission for call sites, provided - /// that it is supported and enabled. - llvm::DINode::DIFlags getCallSiteRelatedAttrs() const; - /// Get the printing policy for producing names for debug info. PrintingPolicy getPrintingPolicy() const; diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index c723b9c48f7f9..13147c4f7d54d 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -6647,15 +6647,6 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, E == MustTailCall, E->getExprLoc()); if (auto *CalleeDecl = dyn_cast_or_null(TargetDecl)) { - // Generate function declaration DISuprogram in order to be used - // in debug info about call sites. - if (CGDebugInfo *DI = getDebugInfo()) { - FunctionArgList Args; - QualType ResTy = BuildFunctionArgList(CalleeDecl, Args); - DI->EmitFuncDeclForCallSite(LocalCallOrInvoke, - DI->getFunctionType(CalleeDecl, ResTy, Args), - CalleeDecl); - } if (CalleeDecl->hasAttr() || CalleeDecl->hasAttr()) { // Function has 'malloc' (aka. 'restrict') or 'alloc_size' attribute. diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp index 5041f59a75c6d..3452ce5339174 100644 --- a/clang/lib/Driver/ToolChain.cpp +++ b/clang/lib/Driver/ToolChain.cpp @@ -850,8 +850,11 @@ void ToolChain::addFortranRuntimeLibs(const ArgList &Args, options::OPT_fno_openmp, false)) { Driver::OpenMPRuntimeKind OMPRuntime = getDriver().getOpenMPRuntime(Args); ToolChain::RuntimeLibType RuntimeLib = GetRuntimeLibType(Args); - if (OMPRuntime == Driver::OMPRT_OMP && RuntimeLib == ToolChain::RLT_Libgcc) + if ((OMPRuntime == Driver::OMPRT_OMP && + RuntimeLib == ToolChain::RLT_Libgcc) && + !getTriple().isKnownWindowsMSVCEnvironment()) { CmdArgs.push_back("-latomic"); + } } } diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h index 203b600078842..4a02c96620335 100644 --- a/clang/lib/Headers/avx512bwintrin.h +++ b/clang/lib/Headers/avx512bwintrin.h @@ -511,7 +511,7 @@ _mm512_packs_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packssdw512((__v16si)__A, (__v16si)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, @@ -519,9 +519,8 @@ _mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B) (__v32hi)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, (__v32hi)_mm512_packs_epi32(__A, __B), (__v32hi)__W); @@ -532,7 +531,7 @@ _mm512_packs_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packsswb512((__v32hi)__A, (__v32hi) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, @@ -540,7 +539,7 @@ _mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) (__v64qi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, @@ -553,7 +552,7 @@ _mm512_packus_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packusdw512((__v16si) __A, (__v16si) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, @@ -561,7 +560,7 @@ _mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B) (__v32hi)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, @@ -574,7 +573,7 @@ _mm512_packus_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packuswb512((__v32hi) __A, (__v32hi) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, @@ -582,7 +581,7 @@ _mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) (__v64qi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, @@ -595,17 +594,15 @@ _mm512_adds_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_elementwise_add_sat((__v64qs)__A, (__v64qs)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_adds_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, (__v64qi)_mm512_adds_epi8(__A, __B), (__v64qi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_adds_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, (__v64qi)_mm512_adds_epi8(__A, __B), (__v64qi)_mm512_setzero_si512()); @@ -616,7 +613,7 @@ _mm512_adds_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_elementwise_add_sat((__v32hi)__A, (__v32hi)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, @@ -624,7 +621,7 @@ _mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, @@ -637,7 +634,7 @@ _mm512_adds_epu8(__m512i __A, __m512i __B) { return (__m512i)__builtin_elementwise_add_sat((__v64qu) __A, (__v64qu) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, @@ -645,7 +642,7 @@ _mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) (__v64qi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, @@ -658,17 +655,15 @@ _mm512_adds_epu16(__m512i __A, __m512i __B) { return (__m512i)__builtin_elementwise_add_sat((__v32hu) __A, (__v32hu) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_adds_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_adds_epu16(__A, __B), (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_adds_epu16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_adds_epu16(__A, __B), (__v32hi)_mm512_setzero_si512()); @@ -886,7 +881,7 @@ _mm512_subs_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_elementwise_sub_sat((__v64qs)__A, (__v64qs)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, @@ -894,7 +889,7 @@ _mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) (__v64qi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, @@ -907,7 +902,7 @@ _mm512_subs_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_elementwise_sub_sat((__v32hi)__A, (__v32hi)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, @@ -915,7 +910,7 @@ _mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, @@ -928,7 +923,7 @@ _mm512_subs_epu8(__m512i __A, __m512i __B) { return (__m512i)__builtin_elementwise_sub_sat((__v64qu) __A, (__v64qu) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, @@ -936,7 +931,7 @@ _mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) (__v64qi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, @@ -949,7 +944,7 @@ _mm512_subs_epu16(__m512i __A, __m512i __B) { return (__m512i)__builtin_elementwise_sub_sat((__v32hu) __A, (__v32hu) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, @@ -957,7 +952,7 @@ _mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, @@ -1191,14 +1186,14 @@ _mm512_unpackhi_epi8(__m512i __A, __m512i __B) { 62, 64+62, 63, 64+63); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, (__v64qi)_mm512_unpackhi_epi8(__A, __B), (__v64qi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, (__v64qi)_mm512_unpackhi_epi8(__A, __B), @@ -1218,14 +1213,14 @@ _mm512_unpackhi_epi16(__m512i __A, __m512i __B) { 30, 32+30, 31, 32+31); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_unpackhi_epi16(__A, __B), (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_unpackhi_epi16(__A, __B), @@ -1253,14 +1248,14 @@ _mm512_unpacklo_epi8(__m512i __A, __m512i __B) { 54, 64+54, 55, 64+55); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, (__v64qi)_mm512_unpacklo_epi8(__A, __B), (__v64qi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, (__v64qi)_mm512_unpacklo_epi8(__A, __B), @@ -1280,14 +1275,14 @@ _mm512_unpacklo_epi16(__m512i __A, __m512i __B) { 26, 32+26, 27, 32+27); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_unpacklo_epi16(__A, __B), (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, (__v32hi)_mm512_unpacklo_epi16(__A, __B), @@ -1566,7 +1561,7 @@ _mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B) { ((__m512i)__builtin_ia32_psrldqi512_byteshift((__v64qi)(__m512i)(a), \ (int)(imm))) -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A) { return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, @@ -1574,23 +1569,21 @@ _mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A) (__v32hi) __W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_mov_epi16(__mmask32 __U, __m512i __A) { return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, (__v32hi) __A, (__v32hi) _mm512_setzero_si512 ()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mask_mov_epi8(__m512i __W, __mmask64 __U, __m512i __A) { return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, (__v64qi) __A, (__v64qi) __W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A) { return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, @@ -1598,7 +1591,7 @@ _mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A) (__v64qi) _mm512_setzero_si512 ()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A) { return (__m512i) __builtin_ia32_selectb_512(__M, @@ -1606,9 +1599,8 @@ _mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A) (__v64qi) __O); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_set1_epi8 (__mmask64 __M, char __A) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_set1_epi8(__mmask64 __M, char __A) { return (__m512i) __builtin_ia32_selectb_512(__M, (__v64qi) _mm512_set1_epi8(__A), (__v64qi) _mm512_setzero_si512()); @@ -1801,7 +1793,7 @@ _mm512_broadcastb_epi8(__m128i __A) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A) { return (__m512i)__builtin_ia32_selectb_512(__M, @@ -1809,15 +1801,14 @@ _mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A) (__v64qi) __O); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_broadcastb_epi8(__mmask64 __M, __m128i __A) { return (__m512i)__builtin_ia32_selectb_512(__M, (__v64qi) _mm512_broadcastb_epi8(__A), (__v64qi) _mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A) { return (__m512i) __builtin_ia32_selectw_512(__M, @@ -1825,9 +1816,8 @@ _mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A) (__v32hi) __O); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_maskz_set1_epi16 (__mmask32 __M, short __A) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_maskz_set1_epi16(__mmask32 __M, short __A) { return (__m512i) __builtin_ia32_selectw_512(__M, (__v32hi) _mm512_set1_epi16(__A), (__v32hi) _mm512_setzero_si512()); @@ -1840,7 +1830,7 @@ _mm512_broadcastw_epi16(__m128i __A) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A) { return (__m512i)__builtin_ia32_selectw_512(__M, @@ -1848,7 +1838,7 @@ _mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A) (__v32hi) __O); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A) { return (__m512i)__builtin_ia32_selectw_512(__M, diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h index 575c0c8962662..d23188ab02b6c 100644 --- a/clang/lib/Headers/avx512vlbwintrin.h +++ b/clang/lib/Headers/avx512vlbwintrin.h @@ -536,14 +536,14 @@ _mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A) { (__v16hi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, (__v8hi)_mm_packs_epi32(__A, __B), (__v8hi)_mm_setzero_si128()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, @@ -551,7 +551,7 @@ _mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) (__v8hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, @@ -559,7 +559,7 @@ _mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) (__v16hi)_mm256_setzero_si256()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, @@ -567,7 +567,7 @@ _mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) (__v16hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, @@ -575,7 +575,7 @@ _mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) (__v16qi)_mm_setzero_si128()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, @@ -583,7 +583,7 @@ _mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) (__v16qi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, @@ -591,7 +591,7 @@ _mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, @@ -599,7 +599,7 @@ _mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) (__v32qi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, @@ -607,7 +607,7 @@ _mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) (__v8hi)_mm_setzero_si128()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, @@ -615,7 +615,7 @@ _mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) (__v8hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, @@ -623,7 +623,7 @@ _mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) (__v16hi)_mm256_setzero_si256()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, @@ -631,7 +631,7 @@ _mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) (__v16hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, @@ -639,7 +639,7 @@ _mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) (__v16qi)_mm_setzero_si128()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, @@ -647,7 +647,7 @@ _mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) (__v16qi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, @@ -655,7 +655,7 @@ _mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, @@ -663,7 +663,7 @@ _mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) (__v32qi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, @@ -671,7 +671,7 @@ _mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) (__v16qi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, @@ -679,7 +679,7 @@ _mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) (__v16qi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, @@ -687,7 +687,7 @@ _mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) (__v32qi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, @@ -695,7 +695,7 @@ _mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -703,7 +703,7 @@ _mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) (__v8hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -711,7 +711,7 @@ _mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) (__v8hi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -719,7 +719,7 @@ _mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) (__v16hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -727,7 +727,7 @@ _mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) (__v16hi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, @@ -735,7 +735,7 @@ _mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) (__v16qi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, @@ -743,7 +743,7 @@ _mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) (__v16qi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, @@ -751,7 +751,7 @@ _mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) (__v32qi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, @@ -759,7 +759,7 @@ _mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -767,7 +767,7 @@ _mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) (__v8hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -775,7 +775,7 @@ _mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) (__v8hi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -783,7 +783,7 @@ _mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) (__v16hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -1095,7 +1095,7 @@ _mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) { (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, @@ -1103,7 +1103,7 @@ _mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) (__v16qi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, @@ -1111,7 +1111,7 @@ _mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) (__v16qi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, @@ -1119,7 +1119,7 @@ _mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) (__v32qi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, @@ -1127,7 +1127,7 @@ _mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -1135,7 +1135,7 @@ _mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) (__v8hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -1143,7 +1143,7 @@ _mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) (__v8hi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -1151,7 +1151,7 @@ _mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) (__v16hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -1159,7 +1159,7 @@ _mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) (__v16hi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, @@ -1167,7 +1167,7 @@ _mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) (__v16qi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, @@ -1175,7 +1175,7 @@ _mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) (__v16qi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, @@ -1183,7 +1183,7 @@ _mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) (__v32qi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, @@ -1191,7 +1191,7 @@ _mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -1199,7 +1199,7 @@ _mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) (__v8hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -1207,7 +1207,7 @@ _mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) (__v8hi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -1215,7 +1215,7 @@ _mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, (__v16hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -1432,14 +1432,14 @@ _mm_cvtepi16_epi8(__m128i __A) { 12, 13, 14, 15); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, (__v16qi) __O, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) { return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, (__v16qi) _mm_setzero_si128(), @@ -1588,112 +1588,112 @@ _mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { (__mmask16)__U, (__v16qi)_mm_unpackhi_epi8(__A, __B), (__v16qi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, (__v16qi)_mm_unpackhi_epi8(__A, __B), (__v16qi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, (__v32qi)_mm256_unpackhi_epi8(__A, __B), (__v32qi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, (__v32qi)_mm256_unpackhi_epi8(__A, __B), (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, (__v8hi)_mm_unpackhi_epi16(__A, __B), (__v8hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, (__v8hi)_mm_unpackhi_epi16(__A, __B), (__v8hi) _mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, (__v16hi)_mm256_unpackhi_epi16(__A, __B), (__v16hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, (__v16hi)_mm256_unpackhi_epi16(__A, __B), (__v16hi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, (__v16qi)_mm_unpacklo_epi8(__A, __B), (__v16qi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, (__v16qi)_mm_unpacklo_epi8(__A, __B), (__v16qi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, (__v32qi)_mm256_unpacklo_epi8(__A, __B), (__v32qi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, (__v32qi)_mm256_unpacklo_epi8(__A, __B), (__v32qi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, (__v8hi)_mm_unpacklo_epi16(__A, __B), (__v8hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, (__v8hi)_mm_unpacklo_epi16(__A, __B), (__v8hi) _mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, (__v16hi)_mm256_unpacklo_epi16(__A, __B), (__v16hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, (__v16hi)_mm256_unpacklo_epi16(__A, __B), (__v16hi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -1701,7 +1701,7 @@ _mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) (__v8hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -1709,7 +1709,7 @@ _mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) (__v8hi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -1717,7 +1717,7 @@ _mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) (__v16hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -1726,7 +1726,7 @@ _mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -1734,7 +1734,7 @@ _mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) (__v8hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -1742,7 +1742,7 @@ _mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) (__v8hi)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -1750,7 +1750,7 @@ _mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) (__v16hi)__W); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A) { return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, @@ -1877,7 +1877,7 @@ _mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B) (__v16hi)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -1885,7 +1885,7 @@ _mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) (__v8hi)__W); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B) { return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, @@ -2173,7 +2173,7 @@ _mm256_maskz_mov_epi8(__mmask32 __U, __m256i __A) { (__v32qi) _mm256_setzero_si256 ()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A) { return (__m128i) __builtin_ia32_selectb_128(__M, @@ -2181,7 +2181,7 @@ _mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A) (__v16qi) __O); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_set1_epi8 (__mmask16 __M, char __A) { return (__m128i) __builtin_ia32_selectb_128(__M, @@ -2189,7 +2189,7 @@ _mm_maskz_set1_epi8 (__mmask16 __M, char __A) (__v16qi) _mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A) { return (__m256i) __builtin_ia32_selectb_256(__M, @@ -2197,7 +2197,7 @@ _mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A) (__v32qi) __O); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_set1_epi8 (__mmask32 __M, char __A) { return (__m256i) __builtin_ia32_selectb_256(__M, @@ -2528,7 +2528,7 @@ _mm256_movm_epi16 (__mmask16 __A) return (__m256i) __builtin_ia32_cvtmask2w256 (__A); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A) { return (__m128i)__builtin_ia32_selectb_128(__M, @@ -2536,7 +2536,7 @@ _mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A) (__v16qi) __O); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A) { return (__m128i)__builtin_ia32_selectb_128(__M, @@ -2544,7 +2544,7 @@ _mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A) (__v16qi) _mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A) { return (__m256i)__builtin_ia32_selectb_256(__M, @@ -2552,7 +2552,7 @@ _mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A) (__v32qi) __O); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A) { return (__m256i)__builtin_ia32_selectb_256(__M, @@ -2560,7 +2560,7 @@ _mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A) (__v32qi) _mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_selectw_128(__M, @@ -2568,7 +2568,7 @@ _mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A) (__v8hi) __O); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_selectw_128(__M, @@ -2576,7 +2576,7 @@ _mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A) (__v8hi) _mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A) { return (__m256i)__builtin_ia32_selectw_256(__M, @@ -2584,7 +2584,7 @@ _mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A) (__v16hi) __O); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A) { return (__m256i)__builtin_ia32_selectw_256(__M, @@ -2592,7 +2592,7 @@ _mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A) (__v16hi) _mm256_setzero_si256()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A) { return (__m256i) __builtin_ia32_selectw_256 (__M, @@ -2600,7 +2600,7 @@ _mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A) (__v16hi) __O); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_set1_epi16 (__mmask16 __M, short __A) { return (__m256i) __builtin_ia32_selectw_256(__M, @@ -2608,7 +2608,7 @@ _mm256_maskz_set1_epi16 (__mmask16 __M, short __A) (__v16hi) _mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A) { return (__m128i) __builtin_ia32_selectw_128(__M, @@ -2616,7 +2616,7 @@ _mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A) (__v8hi) __O); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_set1_epi16 (__mmask8 __M, short __A) { return (__m128i) __builtin_ia32_selectw_128(__M, diff --git a/clang/lib/Headers/pmmintrin.h b/clang/lib/Headers/pmmintrin.h index 42bd343e326de..6b152bde29fc1 100644 --- a/clang/lib/Headers/pmmintrin.h +++ b/clang/lib/Headers/pmmintrin.h @@ -166,7 +166,7 @@ _mm_moveldup_ps(__m128 __a) /// A 128-bit vector of [2 x double] containing the right source operand. /// \returns A 128-bit vector of [2 x double] containing the alternating sums /// and differences of both operands. -static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_addsub_pd(__m128d __a, __m128d __b) { return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b); } diff --git a/clang/lib/Sema/SemaCXXScopeSpec.cpp b/clang/lib/Sema/SemaCXXScopeSpec.cpp index c52fc5bf815af..17ae7ca5627a9 100644 --- a/clang/lib/Sema/SemaCXXScopeSpec.cpp +++ b/clang/lib/Sema/SemaCXXScopeSpec.cpp @@ -780,6 +780,11 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, if (!Found.empty()) { const auto *ND = Found.getAsSingle(); + if (!ND) { + Diag(IdInfo.IdentifierLoc, diag::err_expected_class_or_namespace) + << IdInfo.Identifier << getLangOpts().CPlusPlus; + return true; + } if (::ExtendNestedNameSpecifier(*this, SS, ND, IdInfo.IdentifierLoc, IdInfo.CCLoc)) { const Type *T = SS.getScopeRep().getAsType(); diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index a06c57b15c585..e95fe16e6cb6c 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -3910,12 +3910,15 @@ void SemaHLSL::ActOnVariableDeclarator(VarDecl *VD) { if (VD->getType()->isHLSLIntangibleType()) collectResourceBindingsOnVarDecl(VD); - if (isResourceRecordTypeOrArrayOf(VD) || - VD->hasAttr()) { - // Make the variable for resources static. The global externally visible - // storage is accessed through the handle, which is a member. The variable - // itself is not externally visible. + if (VD->hasAttr()) VD->setStorageClass(StorageClass::SC_Static); + + if (isResourceRecordTypeOrArrayOf(VD) && + VD->getStorageClass() != SC_Static) { + // Add internal linkage attribute to non-static resource variables. The + // global externally visible storage is accessed through the handle, which + // is a member. The variable itself is not externally visible. + VD->addAttr(InternalLinkageAttr::CreateImplicit(getASTContext())); } // process explicit bindings diff --git a/clang/test/AST/HLSL/cbuffer.hlsl b/clang/test/AST/HLSL/cbuffer.hlsl index f3c6636232798..b0b5b989e36c2 100644 --- a/clang/test/AST/HLSL/cbuffer.hlsl +++ b/clang/test/AST/HLSL/cbuffer.hlsl @@ -153,7 +153,7 @@ cbuffer CB { static float SV; // CHECK: VarDecl {{.*}} s7 'EmptyStruct' callinit EmptyStruct s7; - // CHECK: VarDecl {{.*}} Buf 'RWBuffer':'hlsl::RWBuffer' static callinit + // CHECK: VarDecl {{.*}} Buf 'RWBuffer':'hlsl::RWBuffer' callinit RWBuffer Buf; // CHECK: VarDecl {{.*}} ea 'EmptyArrayTypedef':'float[10][0]' EmptyArrayTypedef ea; diff --git a/clang/test/AST/HLSL/private.hlsl b/clang/test/AST/HLSL/private.hlsl index e00afb8f5cbd8..ba7380ec3cfda 100644 --- a/clang/test/AST/HLSL/private.hlsl +++ b/clang/test/AST/HLSL/private.hlsl @@ -3,7 +3,7 @@ // CHECK: VarDecl {{.*}} global_scalar 'hlsl_private int' static cinit static int global_scalar = 0; -// CHECK: VarDecl {{.*}} global_buffer 'RWBuffer':'hlsl::RWBuffer' static callinit +// CHECK: VarDecl {{.*}} global_buffer 'RWBuffer':'hlsl::RWBuffer' callinit RWBuffer global_buffer; class A { diff --git a/clang/test/C/C2y/n3348.c b/clang/test/C/C2y/n3348.c new file mode 100644 index 0000000000000..e20c9f74883f9 --- /dev/null +++ b/clang/test/C/C2y/n3348.c @@ -0,0 +1,44 @@ +// RUN: %clang_cc1 -verify -std=c2y -Wall -pedantic %s + +/* WG14 N3348: No + * Matching of Multi-Dimensional Arrays in Generic Selection Expressions + * + * This allows use of * in a _Generic association as a placeholder for any size + * value. + * + * FIXME: Clang doesn't yet implement this paper. When we do implement it, we + * should expose the functionality in earlier language modes (C89) for + * compatibility with GCC. + */ + +void test(int n, int m) { + static_assert(1 == _Generic(int[3][2], int[3][*]: 1, int[2][*]: 0)); /* expected-error {{star modifier used outside of function prototype}} + expected-error {{array has incomplete element type 'int[]'}} + */ + static_assert(1 == _Generic(int[3][2], int[*][2]: 1, int[*][3]: 0)); // expected-error {{star modifier used outside of function prototype}} + static_assert(1 == _Generic(int[3][n], int[3][*]: 1, int[2][*]: 0)); /* expected-error {{star modifier used outside of function prototype}} + expected-error {{array has incomplete element type 'int[]'}} + */ + static_assert(1 == _Generic(int[n][m], int[*][*]: 1, char[*][*]: 0)); /* expected-error 2 {{star modifier used outside of function prototype}} + expected-error {{array has incomplete element type 'int[]'}} + */ + static_assert(1 == _Generic(int(*)[2], int(*)[*]: 1)); // expected-error {{star modifier used outside of function prototype}} +} + +void questionable() { + // GCC accepts this despite the * appearing outside of a generic association, + // but it's not clear whether that's intentionally supported or an oversight. + // It gives a warning about * being used outside of a declaration, but not + // with an associated warning group. + static_assert(1 == _Generic(int[*][*], int[2][100]: 1)); /* expected-error 2 {{star modifier used outside of function prototype}} + expected-error {{array has incomplete element type 'int[]'}} + */ + // GCC claims this matches multiple associations, so the functionality seems + // like it may be intended to work? + (void)_Generic(int[*][*], /* expected-error 2 {{star modifier used outside of function prototype}} + expected-error {{array has incomplete element type 'int[]'}} + */ + int[2][100]: 1, + int[3][1000]: 2, + ); +} diff --git a/clang/test/CIR/CodeGen/agg-expr-lvalue.c b/clang/test/CIR/CodeGen/agg-expr-lvalue.c index c826f8fa829d0..509f0218e9912 100644 --- a/clang/test/CIR/CodeGen/agg-expr-lvalue.c +++ b/clang/test/CIR/CodeGen/agg-expr-lvalue.c @@ -95,16 +95,13 @@ void test_string_array_in_array(void) { } // CIR-LABEL: cir.func{{.*}} @test_string_array_in_array -// CIR: cir.alloca !cir.array x 2>, {{.*}}, ["matrix", init] -// CIR: cir.get_global -// CIR: cir.copy -// CIR: cir.get_global -// CIR: cir.copy +// CIR: %[[MATRIX:.*]] = cir.alloca !cir.array x 2>, {{.*}}, ["matrix", init] +// CIR: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_array<[#cir.int<104> : !s8i, #cir.int<101> : !s8i, #cir.int<108> : !s8i, #cir.int<108> : !s8i, #cir.int<111> : !s8i, #cir.int<0> : !s8i]> : !cir.array, #cir.const_array<[#cir.int<119> : !s8i, #cir.int<111> : !s8i, #cir.int<114> : !s8i, #cir.int<108> : !s8i, #cir.int<100> : !s8i, #cir.int<0> : !s8i]> : !cir.array]> +// CIR: cir.store{{.*}} %[[CONST]], %[[MATRIX]] // LLVM-LABEL: define{{.*}} @test_string_array_in_array -// LLVM: alloca [2 x [6 x i8]] -// LLVM: call void @llvm.memcpy -// LLVM: call void @llvm.memcpy +// LLVM: %[[MATRIX:.*]] = alloca [2 x [6 x i8]] +// LLVM: store [2 x [6 x i8]] {{\[}}[6 x i8] c"hello\00", [6 x i8] c"world\00"], ptr %[[MATRIX]] // OGCG-LABEL: define{{.*}} @test_string_array_in_array // OGCG: alloca [2 x [6 x i8]] diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 82add4b347e72..5e873810d494b 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -151,50 +151,12 @@ void func2() { } // CIR: %[[ARR2:.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] -// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp", init] -// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %[[ARR2]] : !cir.ptr> -> !cir.ptr -// CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s32i -// CIR: cir.store{{.*}} %[[FIVE]], %[[ARR_0]] : !s32i, !cir.ptr -// CIR: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride %[[ARR_0]], %[[OFFSET_0]] : (!cir.ptr, !s64i) -> !cir.ptr -// CIR: cir.store{{.*}} %[[ELE_PTR]], %[[ARR_PTR]] : !cir.ptr, !cir.ptr> -// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s64i -// CIR: %[[ARR_END:.*]] = cir.ptr_stride %[[ARR_0]], %[[TWO]] : (!cir.ptr, !s64i) -> !cir.ptr -// CIR: cir.do { -// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr>, !cir.ptr -// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CIR: cir.store{{.*}} %[[ZERO]], %[[ARR_CUR]] : !s32i, !cir.ptr -// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[ARR_NEXT:.*]] = cir.ptr_stride %[[ARR_CUR]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CIR: cir.store{{.*}} %[[ARR_NEXT]], %[[ARR_PTR]] : !cir.ptr, !cir.ptr> -// CIR: cir.yield -// CIR: } while { -// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr>, !cir.ptr -// CIR: %[[CMP:.*]] = cir.cmp(ne, %[[ARR_CUR]], %[[ARR_END]]) : !cir.ptr, !cir.bool -// CIR: cir.condition(%[[CMP]]) -// CIR: } +// CIR: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.int<5> : !s32i, #cir.int<0> : !s32i]> : !cir.array +// CIR: cir.store{{.*}} %[[CONST]], %[[ARR2]] : !cir.array, !cir.ptr> // LLVM: define{{.*}} void @_Z5func2v(){{.*}} // LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4 -// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8 -// LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR]], i32 0 -// LLVM: store i32 5, ptr %[[ARR_PTR]], align 4 -// LLVM: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1 -// LLVM: store ptr %[[ELE_1_PTR]], ptr %[[TMP]], align 8 -// LLVM: %[[END_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 2 -// LLVM: br label %[[LOOP_BODY:.*]] -// LLVM: [[LOOP_NEXT:.*]]: -// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// LLVM: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]] -// LLVM: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]] -// LLVM: [[LOOP_BODY]]: -// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// LLVM: store i32 0, ptr %[[CUR]], align 4 -// LLVM: %[[NEXT:.*]] = getelementptr i32, ptr %[[CUR]], i64 1 -// LLVM: store ptr %[[NEXT]], ptr %[[TMP]], align 8 -// LLVM: br label %[[LOOP_NEXT:.*]] -// LLVM: [[LOOP_END]]: -// LLVM: ret void +// LLVM: store [2 x i32] [i32 5, i32 0], ptr %[[ARR]], align 4 // OGCG: %[[ARR:.*]] = alloca [2 x i32], align 4 // OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[ARR]], ptr align 4 @[[FUN2_ARR]], i64 8, i1 false) @@ -209,13 +171,8 @@ void func3() { // CIR: %[[ARR:.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] // CIR: %[[IDX:.*]] = cir.alloca !s32i, !cir.ptr, ["idx", init] // CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr, ["e", init] -// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> -> !cir.ptr -// CIR: %[[V0:.*]] = cir.const #cir.int<5> : !s32i -// CIR: cir.store{{.*}} %[[V0]], %[[ARR_PTR]] : !s32i, !cir.ptr -// CIR: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[ELE_1_PTR:.*]] = cir.ptr_stride %[[ARR_PTR]], %[[OFFSET_0]] : (!cir.ptr, !s64i) -> !cir.ptr -// CIR: %[[V1:.*]] = cir.const #cir.int<6> : !s32i -// CIR: cir.store{{.*}} %[[V1]], %[[ELE_1_PTR]] : !s32i, !cir.ptr +// CIR: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.int<5> : !s32i, #cir.int<6> : !s32i]> : !cir.array +// CIR: cir.store{{.*}} %[[CONST]], %[[ARR]] : !cir.array, !cir.ptr> // CIR: %[[IDX_V:.*]] = cir.const #cir.int<1> : !s32i // CIR: cir.store{{.*}} %[[IDX_V]], %[[IDX]] : !s32i, !cir.ptr // CIR: %[[TMP_IDX:.*]] = cir.load{{.*}} %[[IDX]] : !cir.ptr, !s32i @@ -228,10 +185,7 @@ void func3() { // LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4 // LLVM: %[[IDX:.*]] = alloca i32, i64 1, align 4 // LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4 -// LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR]], i32 0 -// LLVM: store i32 5, ptr %[[ARR_PTR]], align 4 -// LLVM: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1 -// LLVM: store i32 6, ptr %[[ELE_1_PTR]], align 4 +// LLVM: store [2 x i32] [i32 5, i32 6], ptr %[[ARR]], align 4 // LLVM: store i32 1, ptr %[[IDX]], align 4 // LLVM: %[[TMP1:.*]] = load i32, ptr %[[IDX]], align 4 // LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR]], i32 0 @@ -258,15 +212,8 @@ void func4() { // CIR: %[[ARR:.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, ["arr", init] // CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr, ["e", init] -// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr x 2>> -> !cir.ptr> -// CIR: %[[ARR_0_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_PTR]] : !cir.ptr> -> !cir.ptr -// CIR: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i -// CIR: cir.store{{.*}} %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr -// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[ARR_1:.*]] = cir.ptr_stride %[[ARR_PTR]], %[[OFFSET]] : (!cir.ptr>, !s64i) -> !cir.ptr> -// CIR: %[[ARR_1_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_1]] : !cir.ptr> -> !cir.ptr -// CIR: %[[V_1_0:.*]] = cir.const #cir.int<6> : !s32i -// CIR: cir.store{{.*}} %[[V_1_0]], %[[ARR_1_PTR]] : !s32i, !cir.ptr +// CIR: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_array<[#cir.int<5> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<6> : !s32i]> : !cir.array]> : !cir.array x 2> +// CIR: cir.store{{.*}} %[[CONST]], %[[ARR]] : !cir.array x 2>, !cir.ptr x 2>> // CIR: %[[IDX:.*]] = cir.const #cir.int<0> : !s32i // CIR: %[[IDX_1:.*]] = cir.const #cir.int<1> : !s32i // CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr x 2>> -> !cir.ptr> @@ -279,12 +226,7 @@ void func4() { // LLVM: define{{.*}} void @_Z5func4v(){{.*}} // LLVM: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4 // LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4 -// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0 -// LLVM: %[[ARR_0_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0 -// LLVM: store i32 5, ptr %[[ARR_0_0]], align 4 -// LLVM: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1 -// LLVM: %[[ARR_1_0:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0 -// LLVM: store i32 6, ptr %[[ARR_1_0]], align 4 +// LLVM: store [2 x [1 x i32]] {{\[}}[1 x i32] [i32 5], [1 x i32] [i32 6]], ptr %[[ARR]], align 4 // LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0 // LLVM: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1 // LLVM: %[[ARR_1_0:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0 @@ -305,52 +247,12 @@ void func5() { } // CIR: %[[ARR:.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, ["arr", init] -// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["arrayinit.temp", init] -// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %0 : !cir.ptr x 2>> -> !cir.ptr> -// CIR: %[[ARR_0_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_0]] : !cir.ptr> -> !cir.ptr -// CIR: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i -// CIR: cir.store{{.*}} %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr -// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[ARR_1:.*]] = cir.ptr_stride %[[ARR_0]], %[[OFFSET]] : (!cir.ptr>, !s64i) -> !cir.ptr> -// CIR: cir.store{{.*}} %[[ARR_1]], %[[ARR_PTR]] : !cir.ptr>, !cir.ptr>> -// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s64i -// CIR: %[[ARR_END:.*]] = cir.ptr_stride %[[ARR_0]], %[[TWO]] : (!cir.ptr>, !s64i) -> !cir.ptr> -// CIR: cir.do { -// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr>>, !cir.ptr> -// CIR: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array -// CIR: cir.store{{.*}} %[[ZERO]], %[[ARR_CUR]] : !cir.array, !cir.ptr> -// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[ARR_NEXT:.*]] = cir.ptr_stride %[[ARR_CUR]], %[[ONE]] : (!cir.ptr>, !s64i) -> !cir.ptr> -// CIR: cir.store{{.*}} %[[ARR_NEXT]], %[[ARR_PTR]] : !cir.ptr>, !cir.ptr>> -// CIR: cir.yield -// CIR: } while { -// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr>>, !cir.ptr> -// CIR: %[[CMP:.*]] = cir.cmp(ne, %[[ARR_CUR]], %[[ARR_END]]) : !cir.ptr>, !cir.bool -// CIR: cir.condition(%[[CMP]]) -// CIR: } +// CIR: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_array<[#cir.int<5> : !s32i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2> +// CIR: cir.store{{.*}} %[[CONST]], %[[ARR]] : !cir.array x 2>, !cir.ptr x 2>> // LLVM: define{{.*}} void @_Z5func5v(){{.*}} // LLVM: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4 -// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8 -// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0 -// LLVM: %[[ARR_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0 -// LLVM: store i32 5, ptr %[[ARR_0]], align 4 -// LLVM: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1 -// LLVM: store ptr %[[ARR_1]], ptr %[[TMP]], align 8 -// LLVM: %[[END_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 2 -// LLVM: br label %[[LOOP_BODY:.*]] -// LLVM: [[LOOP_NEXT:.*]]: -// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// LLVM: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]] -// LLVM: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]] -// LLVM: [[LOOP_BODY]]: -// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// LLVM: store [1 x i32] zeroinitializer, ptr %[[CUR]], align 4 -// LLVM: %[[NEXT:.*]] = getelementptr [1 x i32], ptr %[[CUR]], i64 1 -// LLVM: store ptr %[[NEXT]], ptr %[[TMP]], align 8 -// LLVM: br label %[[LOOP_NEXT:.*]] -// LLVM: [[LOOP_END]]: -// LLVM: ret void +// LLVM: store [2 x [1 x i32]] {{\[}}[1 x i32] [i32 5], [1 x i32] zeroinitializer], ptr %[[ARR]], align 4 // ORGC: %[[ARR:.*]] = alloca [2 x [1 x i32]], align 4 // ORGC: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[ARR]], ptr align 4 @[[FUN5_ARR]], i64 8, i1 false) @@ -395,44 +297,12 @@ void func7() { } // CIR: %[[ARR:.*]] = cir.alloca !cir.array x 1>, !cir.ptr x 1>>, ["arr", init] -// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["arrayinit.temp", init] -// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr x 1>> -> !cir.ptr> -// CIR: cir.store{{.*}} %[[ARR_0]], %[[ARR_PTR]] : !cir.ptr>, !cir.ptr>> -// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[ARR_END:.*]] = cir.ptr_stride %[[ARR_0]], %[[ONE]] : (!cir.ptr>, !s64i) -> !cir.ptr> -// CIR: cir.do { -// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr>>, !cir.ptr> -// CIR: %[[NULL_PTR:.*]] = cir.const #cir.ptr : !cir.ptr -// CIR: cir.store{{.*}} %[[NULL_PTR]], %[[ARR_CUR]] : !cir.ptr, !cir.ptr> -// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[ARR_NEXT:.*]] = cir.ptr_stride %[[ARR_CUR]], %[[ONE]] : (!cir.ptr>, !s64i) -> !cir.ptr> -// CIR: cir.store{{.*}} %[[ARR_NEXT]], %[[ARR_PTR]] : !cir.ptr>, !cir.ptr>> -// CIR: cir.yield -// CIR: } while { -// CIR: %[[ARR_CUR:.*]] = cir.load{{.*}} %[[ARR_PTR]] : !cir.ptr>>, !cir.ptr> -// CIR: %[[CMP:.*]] = cir.cmp(ne, %[[ARR_CUR]], %[[ARR_END]]) : !cir.ptr>, !cir.bool -// CIR: cir.condition(%[[CMP]]) -// CIR: } +// CIR: %[[CONST:.*]] = cir.const #cir.zero : !cir.array x 1> +// CIR: cir.store{{.*}} %[[CONST]], %[[ARR]] : !cir.array x 1>, !cir.ptr x 1>> // LLVM: define{{.*}} void @_Z5func7v(){{.*}} // LLVM: %[[ARR:.*]] = alloca [1 x ptr], i64 1, align 8 -// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8 -// LLVM: %[[ARR_PTR:.*]] = getelementptr ptr, ptr %[[ARR]], i32 0 -// LLVM: store ptr %[[ARR_PTR]], ptr %[[TMP]], align 8 -// LLVM: %[[END_PTR:.*]] = getelementptr ptr, ptr %[[ARR_PTR]], i64 1 -// LLVM: br label %[[LOOP_BODY:.*]] -// LLVM: [[LOOP_NEXT:.*]]: -// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// LLVM: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]] -// LLVM: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]] -// LLVM: [[LOOP_BODY]]: -// LLVM: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// LLVM: store ptr null, ptr %[[CUR]], align 8 -// LLVM: %[[NEXT:.*]] = getelementptr ptr, ptr %[[CUR]], i64 1 -// LLVM: store ptr %[[NEXT]], ptr %[[TMP]], align 8 -// LLVM: br label %[[LOOP_NEXT:.*]] -// LLVM: [[LOOP_END]]: -// LLVM: ret void +// LLVM: store [1 x ptr] zeroinitializer, ptr %[[ARR]], align 8 // OGCG: %[[ARR:.*]] = alloca [1 x ptr], align 8 // OGCG: call void @llvm.memset.p0.i64(ptr align 8 %[[ARR]], i8 0, i64 8, i1 false) @@ -581,19 +451,11 @@ void array_with_complex_elements() { } // CIR: %[[ARR_ADDR:.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, ["arr", init] -// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %[[ARR_ADDR]] : !cir.ptr x 2>> -> !cir.ptr> -// CIR: %[[CONST_COMPLEX_0:.*]] = cir.const #cir.const_complex<#cir.fp<1.100000e+00> : !cir.float, #cir.fp<2.200000e+00> : !cir.float> : !cir.complex -// CIR: cir.store{{.*}} %[[CONST_COMPLEX_0]], %[[ARR_0]] : !cir.complex, !cir.ptr> -// CIR: %[[IDX_1:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[ARR_1:.*]] = cir.ptr_stride %1, %[[IDX_1]] : (!cir.ptr>, !s64i) -> !cir.ptr> -// CIR: %[[CONST_COMPLEX_1:.*]] = cir.const #cir.const_complex<#cir.fp<3.300000e+00> : !cir.float, #cir.fp<4.400000e+00> : !cir.float> : !cir.complex -// CIR: cir.store{{.*}} %[[CONST_COMPLEX_1]], %[[ARR_1]] : !cir.complex, !cir.ptr> +// CIR: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_complex<#cir.fp<1.100000e+00> : !cir.float, #cir.fp<2.200000e+00> : !cir.float> : !cir.complex, #cir.const_complex<#cir.fp<3.300000e+00> : !cir.float, #cir.fp<4.400000e+00> : !cir.float> : !cir.complex]> : !cir.array x 2> +// CIR: cir.store{{.*}} %[[CONST]], %[[ARR_ADDR]] : !cir.array x 2>, !cir.ptr x 2>> // LLVM: %[[ARR_ADDR:.*]] = alloca [2 x { float, float }], i64 1, align 16 -// LLVM: %[[ARR_0:.*]] = getelementptr { float, float }, ptr %[[ARR_ADDR]], i32 0 -// LLVM: store { float, float } { float 0x3FF19999A0000000, float 0x40019999A0000000 }, ptr %[[ARR_0]], align 8 -// LLVM: %[[ARR_1:.*]] = getelementptr { float, float }, ptr %[[ARR_0]], i64 1 -// LLVM: store { float, float } { float 0x400A666660000000, float 0x40119999A0000000 }, ptr %[[ARR_1]], align 8 +// LLVM: store [2 x { float, float }] [{ float, float } { float 0x3FF19999A0000000, float 0x40019999A0000000 }, { float, float } { float 0x400A666660000000, float 0x40119999A0000000 }], ptr %[[ARR_ADDR]], align 16 // OGCG: %[[ARR_ADDR:.*]] = alloca [2 x { float, float }], align 16 // OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 16 %[[ARR_ADDR]], ptr align 16 @__const._Z27array_with_complex_elementsv.arr, i64 16, i1 false) diff --git a/clang/test/CIR/CodeGen/compound_literal.cpp b/clang/test/CIR/CodeGen/compound_literal.cpp index 30a1dc03c449b..5219710d3e8bc 100644 --- a/clang/test/CIR/CodeGen/compound_literal.cpp +++ b/clang/test/CIR/CodeGen/compound_literal.cpp @@ -79,17 +79,17 @@ void foo3() { } // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["a", init] -// CIR: %[[CL_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, [".compoundliteral", init] // CIR: %[[VEC:.*]] = cir.const #cir.const_vector<[#cir.int<10> : !s32i, #cir.int<20> : !s32i, #cir.int<30> : !s32i, #cir.int<40> : !s32i]> : !cir.vector<4 x !s32i> -// CIR: cir.store{{.*}} %[[VEC]], %[[CL_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> -// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[CL_ADDR]] : !cir.ptr>, !cir.vector<4 x !s32i> -// CIR: cir.store{{.*}} %[[TMP]], %[[A_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> +// CIR: cir.store{{.*}} %[[VEC]], %[[A_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> // LLVM: %[[A_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 -// LLVM: %[[CL_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 -// LLVM: store <4 x i32> , ptr %[[CL_ADDR]], align 16 -// LLVM: %[[TMP:.*]] = load <4 x i32>, ptr %[[CL_ADDR]], align 16 -// LLVM: store <4 x i32> %[[TMP]], ptr %[[A_ADDR]], align 16 +// LLVM: store <4 x i32> , ptr %[[A_ADDR]], align 16 + +// FIXME: OGCG emits a temporary compound literal in this case because it omits +// vector types from the check for aggregate constants in +// EmitAutoVarAlloca. This looks like an oversight in OGCG because the +// code to emit a constant in EmitStoresForConstant specifically looks +// for vector types in OGCG. // OGCG: %[[A_ADDR:.*]] = alloca <4 x i32>, align 16 // OGCG: %[[CL_ADDR:.*]] = alloca <4 x i32>, align 16 @@ -107,19 +107,12 @@ void foo4() { // CIR-LABEL: @_Z4foo4v // CIR: %[[P:.*]] = cir.alloca !rec_Point, !cir.ptr, ["p", init] -// CIR: %[[P_X:.*]] = cir.get_member %[[P]][0] {name = "x"} -// CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s32i -// CIR: cir.store{{.*}} %[[FIVE]], %[[P_X]] -// CIR: %[[P_Y:.*]] = cir.get_member %[[P]][1] {name = "y"} -// CIR: %[[TEN:.*]] = cir.const #cir.int<10> : !s32i -// CIR: cir.store{{.*}} %[[TEN]], %[[P_Y]] +// CIR: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<5> : !s32i, #cir.int<10> : !s32i}> : !rec_Point +// CIR: cir.store{{.*}} %[[CONST]], %[[P]] : !rec_Point, !cir.ptr // LLVM-LABEL: @_Z4foo4v // LLVM: %[[P:.*]] = alloca %struct.Point -// LLVM: %[[P_X:.*]] = getelementptr %struct.Point, ptr %[[P]], i32 0, i32 0 -// LLVM: store i32 5, ptr %[[P_X]] -// LLVM: %[[P_Y:.*]] = getelementptr %struct.Point, ptr %[[P]], i32 0, i32 1 -// LLVM: store i32 10, ptr %[[P_Y]] +// LLVM: store %struct.Point { i32 5, i32 10 }, ptr %[[P]], align 4 // OGCG-LABEL: @_Z4foo4v // OGCG: %[[P:.*]] = alloca %struct.Point diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 3d286664bba85..463434c38a1af 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -312,23 +312,10 @@ void l5() { // CIR: %[[BEGIN_ADDR:.*]] = cir.alloca {{.*}} ["__begin1", init] // CIR: %[[END_ADDR:.*]] = cir.alloca {{.*}} ["__end1", init] // CIR: %[[X_ADDR:.*]] = cir.alloca {{.*}} ["x", init] -// CIR: %[[ARR_CAST:.*]] = cir.cast array_to_ptrdecay %[[ARR_ADDR]] : {{.*}} -// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CIR: cir.store{{.*}} %[[ONE]], %[[ARR_CAST]] -// CIR: %[[OFFSET1:.*]] = cir.const #cir.int<1> : !s64i -// CIR: %[[STRIDE:.*]] = cir.ptr_stride %[[ARR_CAST]], %[[OFFSET1]] : ({{.*}}, {{.*}}) -// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s32i -// CIR: cir.store{{.*}} %[[TWO]], %[[STRIDE]] -// CIR: %[[OFFSET2:.*]] = cir.const #cir.int<2> : !s64i -// CIR: %[[STRIDE2:.*]] = cir.ptr_stride %[[ARR_CAST]], %[[OFFSET2]] : ({{.*}}, {{.*}}) -// CIR: %[[THREE:.*]] = cir.const #cir.int<3> : !s32i -// CIR: cir.store{{.*}} %[[THREE]], %[[STRIDE2]] -// CIR: %[[OFFSET3:.*]] = cir.const #cir.int<3> : !s64i -// CIR: %[[STRIDE3:.*]] = cir.ptr_stride %[[ARR_CAST]], %[[OFFSET3]] : ({{.*}}, {{.*}}) -// CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !s32i -// CIR: cir.store{{.*}} %[[FOUR]], %[[STRIDE3]] +// CIR: %[[ARR_INIT:.*]] = cir.const #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, #cir.int<4> : !s32i]> +// CIR: cir.store{{.*}} %[[ARR_INIT]], %[[ARR_ADDR]] // CIR: cir.store{{.*}} %[[ARR_ADDR]], %[[RANGE_ADDR]] -// CIR: %[[RANGE_LOAD:.*]] = cir.load{{.*}} %[[RANGE_ADDR]] +// CIR: %[[RANGE_LOAD:.*]] = cir.load %[[RANGE_ADDR]] // CIR: %[[RANGE_CAST:.*]] = cir.cast array_to_ptrdecay %[[RANGE_LOAD]] : {{.*}} // CIR: cir.store{{.*}} %[[RANGE_CAST]], %[[BEGIN_ADDR]] // CIR: %[[BEGIN:.*]] = cir.load{{.*}} %[[RANGE_ADDR]] @@ -363,14 +350,7 @@ void l5() { // LLVM: %[[X_ADDR:.*]] = alloca i32 // LLVM: br label %[[SETUP:.*]] // LLVM: [[SETUP]]: -// LLVM: %[[ARR_0:.*]] = getelementptr i32, ptr %[[ARR_ADDR]], i32 0 -// LLVM: store i32 1, ptr %[[ARR_0]] -// LLVM: %[[ARR_1:.*]] = getelementptr i32, ptr %[[ARR_0]], i64 1 -// LLVM: store i32 2, ptr %[[ARR_1]] -// LLVM: %[[ARR_2:.*]] = getelementptr i32, ptr %[[ARR_0]], i64 2 -// LLVM: store i32 3, ptr %[[ARR_2]] -// LLVM: %[[ARR_3:.*]] = getelementptr i32, ptr %[[ARR_0]], i64 3 -// LLVM: store i32 4, ptr %[[ARR_3]] +// LLVM: store [4 x i32] [i32 1, i32 2, i32 3, i32 4], ptr %[[ARR_ADDR]] // LLVM: store ptr %[[ARR_ADDR]], ptr %[[RANGE_ADDR]] // LLVM: %[[BEGIN:.*]] = load ptr, ptr %[[RANGE_ADDR]] // LLVM: %[[BEGIN_CAST:.*]] = getelementptr i32, ptr %[[BEGIN]], i32 0 diff --git a/clang/test/CIR/CodeGen/paren-init-list.cpp b/clang/test/CIR/CodeGen/paren-init-list.cpp index 0efa36352899e..a5676e2b31667 100644 --- a/clang/test/CIR/CodeGen/paren-init-list.cpp +++ b/clang/test/CIR/CodeGen/paren-init-list.cpp @@ -13,18 +13,11 @@ struct CompleteS { void cxx_paren_list_init_expr() { CompleteS a(1, 'a'); } // CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["a", init] -// CIR: %[[ELEM_0_PTR:.*]] = cir.get_member %[[A_ADDR]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CIR: %[[ELEM_0_VAL:.*]] = cir.const #cir.int<1> : !s32i -// CIR: cir.store{{.*}} %[[ELEM_0_VAL]], %[[ELEM_0_PTR]] : !s32i, !cir.ptr -// CIR: %[[ELEM_1_PTR:.*]] = cir.get_member %[[A_ADDR]][1] {name = "b"} : !cir.ptr -> !cir.ptr -// CIR: %[[ELEM_1_VAL:.*]] = cir.const #cir.int<97> : !s8i -// CIR: cir.store{{.*}} %[[ELEM_1_VAL]], %[[ELEM_1_PTR]] : !s8i, !cir.ptr +// CIR: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<97> : !s8i}> : !rec_CompleteS +// CIR: cir.store{{.*}} %[[CONST]], %[[A_ADDR]] // LLVM: %[[A_ADDR:.*]] = alloca %struct.CompleteS, i64 1, align 4 -// LLVM: %[[ELEM_0_PTR:.*]] = getelementptr %struct.CompleteS, ptr %[[A_ADDR]], i32 0, i32 0 -// LLVM: store i32 1, ptr %[[ELEM_0_PTR]], align 4 -// LLVM: %[[ELEM_1_PTR:.*]] = getelementptr %struct.CompleteS, ptr %[[A_ADDR]], i32 0, i32 1 -// LLVM: store i8 97, ptr %[[ELEM_1_PTR]], align 4 +// LLVM: store %struct.CompleteS { i32 1, i8 97 }, ptr %[[A_ADDR]], align 4 // OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4 // OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[A_ADDR]], ptr align 4 @__const._Z24cxx_paren_list_init_exprv.a, i64 8, i1 false) diff --git a/clang/test/CIR/CodeGen/statement-exprs.c b/clang/test/CIR/CodeGen/statement-exprs.c index 2d05fc13ff6e1..f917334ade829 100644 --- a/clang/test/CIR/CodeGen/statement-exprs.c +++ b/clang/test/CIR/CodeGen/statement-exprs.c @@ -233,9 +233,8 @@ int test3() { return ({ struct S s = {1}; s; }).x; } // CIR: %[[TMP:.+]] = cir.alloca !rec_S, !cir.ptr, ["tmp"] // CIR: cir.scope { // CIR: %[[S:.+]] = cir.alloca !rec_S, !cir.ptr, ["s", init] -// CIR: %[[GEP_X_S:.+]] = cir.get_member %[[S]][0] {name = "x"} : !cir.ptr -> !cir.ptr -// CIR: %[[C1:.+]] = cir.const #cir.int<1> : !s32i -// CIR: cir.store {{.*}} %[[C1]], %[[GEP_X_S]] : !s32i, !cir.ptr +// CIR: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i}> : !rec_S +// CIR: cir.store{{.*}} %[[CONST]], %[[S]] : !rec_S, !cir.ptr // CIR: cir.copy %[[S]] to %[[REF_TMP0]] : !cir.ptr // CIR: } // CIR: %[[GEP_X_TMP:.+]] = cir.get_member %[[REF_TMP0]][0] {name = "x"} : !cir.ptr -> !cir.ptr @@ -254,8 +253,7 @@ int test3() { return ({ struct S s = {1}; s; }).x; } // LLVM: [[LBL5]]: // LLVM: br label %[[LBL6:.+]] // LLVM: [[LBL6]]: -// LLVM: %[[GEP_S:.+]] = getelementptr %struct.S, ptr %[[VAR3]], i32 0, i32 0 -// LLVM: store i32 1, ptr %[[GEP_S]] +// LLVM: store %struct.S { i32 1 }, ptr %[[VAR3]] // LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[VAR1]], ptr %[[VAR3]], i32 4, i1 false) // LLVM: br label %[[LBL8:.+]] // LLVM: [[LBL8]]: diff --git a/clang/test/CIR/CodeGen/struct-init.cpp b/clang/test/CIR/CodeGen/struct-init.cpp index 79886190616b9..8f146684ffb10 100644 --- a/clang/test/CIR/CodeGen/struct-init.cpp +++ b/clang/test/CIR/CodeGen/struct-init.cpp @@ -65,41 +65,16 @@ void init() { // CIR: cir.func{{.*}} @_Z4initv() // CIR: %[[S1:.*]] = cir.alloca !rec_S, !cir.ptr, ["s1", init] // CIR: %[[S2:.*]] = cir.alloca !rec_S, !cir.ptr, ["s2", init] -// CIR: %[[S1_A:.*]] = cir.get_member %[[S1]][0] {name = "a"} -// CIR: %[[ONE:.*]] = cir.const #cir.int<1> -// CIR: cir.store{{.*}} %[[ONE]], %[[S1_A]] -// CIR: %[[S1_B:.*]] = cir.get_member %[[S1]][1] {name = "b"} -// CIR: %[[TWO:.*]] = cir.const #cir.int<2> -// CIR: cir.store{{.*}} %[[TWO]], %[[S1_B]] -// CIR: %[[S1_C:.*]] = cir.get_member %[[S1]][2] {name = "c"} -// CIR: %[[THREE:.*]] = cir.const #cir.int<3> -// CIR: cir.store{{.*}} %[[THREE]], %[[S1_C]] -// CIR: %[[S2_A:.*]] = cir.get_member %[[S2]][0] {name = "a"} -// CIR: %[[FOUR:.*]] = cir.const #cir.int<4> -// CIR: cir.store{{.*}} %[[FOUR]], %[[S2_A]] -// CIR: %[[S2_B:.*]] = cir.get_member %[[S2]][1] {name = "b"} -// CIR: %[[FIVE:.*]] = cir.const #cir.int<5> -// CIR: cir.store{{.*}} %[[FIVE]], %[[S2_B]] -// CIR: %[[S2_C:.*]] = cir.get_member %[[S2]][2] {name = "c"} -// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> -// CIR: cir.store{{.*}} %[[ZERO]], %[[S2_C]] -// CIR: cir.return +// CIR: %[[CONST_1:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i}> : !rec_S +// CIR: cir.store{{.*}} %[[CONST_1]], %[[S1]] +// CIR: %[[CONST_2:.*]] = cir.const #cir.const_record<{#cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<0> : !s32i}> : !rec_S +// CIR: cir.store{{.*}} %[[CONST_2]], %[[S2]] // LLVM: define{{.*}} void @_Z4initv() // LLVM: %[[S1:.*]] = alloca %struct.S // LLVM: %[[S2:.*]] = alloca %struct.S -// LLVM: %[[S1_A:.*]] = getelementptr %struct.S, ptr %[[S1]], i32 0, i32 0 -// LLVM: store i32 1, ptr %[[S1_A]] -// LLVM: %[[S1_B:.*]] = getelementptr %struct.S, ptr %[[S1]], i32 0, i32 1 -// LLVM: store i32 2, ptr %[[S1_B]] -// LLVM: %[[S1_C:.*]] = getelementptr %struct.S, ptr %[[S1]], i32 0, i32 2 -// LLVM: store i32 3, ptr %[[S1_C]] -// LLVM: %[[S2_A:.*]] = getelementptr %struct.S, ptr %[[S2]], i32 0, i32 0 -// LLVM: store i32 4, ptr %[[S2_A]] -// LLVM: %[[S2_B:.*]] = getelementptr %struct.S, ptr %[[S2]], i32 0, i32 1 -// LLVM: store i32 5, ptr %[[S2_B]] -// LLVM: %[[S2_C:.*]] = getelementptr %struct.S, ptr %[[S2]], i32 0, i32 2 -// LLVM: store i32 0, ptr %[[S2_C]] +// LLVM: store %struct.S { i32 1, i32 2, i32 3 }, ptr %[[S1]], align 4 +// LLVM: store %struct.S { i32 4, i32 5, i32 0 }, ptr %[[S2]], align 4 // OGCG: @__const._Z4initv.s1 = private unnamed_addr constant %struct.S { i32 1, i32 2, i32 3 } // OGCG: @__const._Z4initv.s2 = private unnamed_addr constant %struct.S { i32 4, i32 5, i32 0 } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index ee543001025e7..c15e7e7c57b9f 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -107,21 +107,14 @@ void paren_expr() { // CIR: cir.func{{.*}} @_Z10paren_exprv() // CIR: %[[A_ADDR:.*]] = cir.alloca !rec_Point, !cir.ptr, ["a", init] // CIR: %[[B_ADDR:.*]] = cir.alloca !rec_Point, !cir.ptr, ["b", init] -// CIR: %[[X_ELEM_PTR:.*]] = cir.get_member %[[A_ADDR]][0] {name = "x"} : !cir.ptr -> !cir.ptr -// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i -// CIR: cir.store{{.*}} %[[CONST_0]], %[[X_ELEM_PTR]] : !s32i, !cir.ptr -// CIR: %[[Y_ELEM_PTR:.*]] = cir.get_member %[[A_ADDR]][1] {name = "y"} : !cir.ptr -> !cir.ptr -// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i -// CIR: cir.store{{.*}} %[[CONST_0]], %[[Y_ELEM_PTR]] : !s32i, !cir.ptr +// CIR: %[[CONST:.*]] = cir.const #cir.zero : !rec_Point +// CIR: cir.store{{.*}} %[[CONST]], %[[A_ADDR]] : !rec_Point, !cir.ptr // CIR: cir.call @_ZZ10paren_exprvEN5PointC1ERKS_(%[[B_ADDR]], %[[A_ADDR]]) nothrow : (!cir.ptr, !cir.ptr) -> () // LLVM: define{{.*}} void @_Z10paren_exprv() // LLVM: %[[A_ADDR:.*]] = alloca %struct.Point, i64 1, align 4 // LLVM: %[[B_ADDR:.*]] = alloca %struct.Point, i64 1, align 4 -// LLVM: %[[X_ELEM_PTR:.*]] = getelementptr %struct.Point, ptr %[[A_ADDR]], i32 0, i32 0 -// LLVM: store i32 0, ptr %[[X_ELEM_PTR]], align 4 -// LLVM: %[[Y_ELEM_PTR:.*]] = getelementptr %struct.Point, ptr %[[A_ADDR]], i32 0, i32 1 -// LLVM: store i32 0, ptr %[[Y_ELEM_PTR]], align 4 +// LLVM: store %struct.Point zeroinitializer, ptr %[[A_ADDR]], align 4 // LLVM: call void @_ZZ10paren_exprvEN5PointC1ERKS_(ptr %[[B_ADDR]], ptr %[[A_ADDR]]) // OGCG: define{{.*}} void @_Z10paren_exprv() @@ -265,16 +258,11 @@ void bin_comma() { // CIR: cir.func{{.*}} @_Z9bin_commav() // CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["a", init] -// CIR: %[[TMP_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["agg.tmp.ensured"] -// CIR: %[[ZERO:.*]] = cir.const #cir.zero : !rec_CompleteS -// CIR: cir.store{{.*}} %[[ZERO]], %[[TMP_ADDR]] : !rec_CompleteS, !cir.ptr -// CIR: %[[ZERO:.*]] = cir.const #cir.zero : !rec_CompleteS -// CIR: cir.store{{.*}} %[[ZERO]], %[[A_ADDR]] : !rec_CompleteS, !cir.ptr +// CIR: %[[CONST:.*]] = cir.const #cir.zero : !rec_CompleteS +// CIR: cir.store{{.*}} %[[CONST]], %[[A_ADDR]] : !rec_CompleteS, !cir.ptr // LLVM: define{{.*}} void @_Z9bin_commav() // LLVM: %[[A_ADDR:.*]] = alloca %struct.CompleteS, i64 1, align 4 -// LLVM: %[[TMP_ADDR:.*]] = alloca %struct.CompleteS, i64 1, align 4 -// LLVM: store %struct.CompleteS zeroinitializer, ptr %[[TMP_ADDR]], align 4 // LLVM: store %struct.CompleteS zeroinitializer, ptr %[[A_ADDR]], align 4 // OGCG: define{{.*}} void @_Z9bin_commav() @@ -284,20 +272,13 @@ void bin_comma() { void compound_literal_expr() { CompleteS a = (CompleteS){}; } // CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["a", init] -// CIR: %[[A_ELEM_0_PTR:.*]] = cir.get_member %[[A_ADDR]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i -// CIR: cir.store{{.*}} %[[CONST_0]], %[[A_ELEM_0_PTR]] : !s32i, !cir.ptr -// CIR: %[[A_ELEM_1_PTR:.*]] = cir.get_member %[[A_ADDR]][1] {name = "b"} : !cir.ptr -> !cir.ptr -// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s8i -// CIR: cir.store{{.*}} %[[CONST_0]], %[[A_ELEM_1_PTR]] : !s8i, !cir.ptr +// CIR: %[[CONST:.*]] = cir.const #cir.zero : !rec_CompleteS +// CIR: cir.store{{.*}} %[[CONST]], %[[A_ADDR]] : !rec_CompleteS, !cir.ptr // TODO(cir): zero-initialize the padding // LLVM: %[[A_ADDR:.*]] = alloca %struct.CompleteS, i64 1, align 4 -// LLVM: %[[A_ELEM_0_PTR:.*]] = getelementptr %struct.CompleteS, ptr %[[A_ADDR]], i32 0, i32 0 -// LLVM: store i32 0, ptr %[[A_ELEM_0_PTR]], align 4 -// LLVM: %[[A_ELEM_1_PTR:.*]] = getelementptr %struct.CompleteS, ptr %[[A_ADDR]], i32 0, i32 1 -// LLVM: store i8 0, ptr %[[A_ELEM_1_PTR]], align 4 +// LLVM: store %struct.CompleteS zeroinitializer, ptr %[[A_ADDR]], align 4 // OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4 // OGCG: call void @llvm.memset.p0.i64(ptr align 4 %[[A_ADDR]], i8 0, i64 8, i1 false) diff --git a/clang/test/CIR/CodeGen/variable-decomposition.cpp b/clang/test/CIR/CodeGen/variable-decomposition.cpp index ba59109ab625f..f0e19263cd6db 100644 --- a/clang/test/CIR/CodeGen/variable-decomposition.cpp +++ b/clang/test/CIR/CodeGen/variable-decomposition.cpp @@ -19,12 +19,8 @@ float function() { // CIR-LABEL: cir.func dso_local @_Z8functionv() -> !cir.float // CIR: %[[RETVAL:.+]] = cir.alloca !cir.float, !cir.ptr, ["__retval"] // CIR: %[[STRUCT:.+]] = cir.alloca !rec_some_struct, !cir.ptr, ["", init] -// CIR: %[[MEMBER_A:.+]] = cir.get_member %[[STRUCT]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CIR: %[[CONST_1:.+]] = cir.const #cir.int<1> : !s32i -// CIR: cir.store{{.*}} %[[CONST_1]], %[[MEMBER_A]] -// CIR: %[[MEMBER_B:.+]] = cir.get_member %[[STRUCT]][1] {name = "b"} : !cir.ptr -> !cir.ptr -// CIR: %[[TWO_FP:.+]] = cir.const #cir.fp<2.000000e+00> : !cir.float -// CIR: cir.store{{.*}} %[[TWO_FP]], %[[MEMBER_B]] +// CIR: %[[CONST:.+]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.fp<2.000000e+00> : !cir.float}> : !rec_some_struct +// CIR: cir.store{{.*}} %[[CONST]], %[[STRUCT]] // CIR: %[[MEMBER_A:.+]] = cir.get_member %[[STRUCT]][0] {name = "a"} : !cir.ptr -> !cir.ptr // CIR: %[[LOAD_A:.+]] = cir.load align(4) %[[MEMBER_A]] : !cir.ptr, !s32i // CIR: %[[CAST_A:.+]] = cir.cast int_to_float %[[LOAD_A]] : !s32i -> !cir.float @@ -38,10 +34,7 @@ float function() { // LLVM-LABEL: define dso_local float @_Z8functionv() // LLVM: %[[RETVAL:.+]] = alloca float, i64 1 // LLVM: %[[STRUCT:.+]] = alloca %struct.some_struct, i64 1 -// LLVM: %[[GEP_A:.+]] = getelementptr %struct.some_struct, ptr %[[STRUCT]], i32 0, i32 0 -// LLVM: store i32 1, ptr %[[GEP_A]] -// LLVM: %[[GEP_B:.+]] = getelementptr %struct.some_struct, ptr %[[STRUCT]], i32 0, i32 1 -// LLVM: store float 2.000000e+00, ptr %[[GEP_B]] +// LLVM: store %struct.some_struct { i32 1, float 2.000000e+00 }, ptr %[[STRUCT]] // LLVM: %[[GEP_A:.+]] = getelementptr %struct.some_struct, ptr %[[STRUCT]], i32 0, i32 0 // LLVM: %[[LOAD_A:.+]] = load i32, ptr %[[GEP_A]] // LLVM: %[[CAST_A:.+]] = sitofp i32 %[[LOAD_A]] to float diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp index 53eba7bafb312..d289336ccaf8c 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s struct DefaultOperators { int i; @@ -24,21 +25,8 @@ void acc_combined() { // CHECK: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -83,21 +71,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -142,21 +117,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -240,21 +202,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -338,15 +287,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperatorsNoFloats, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -379,15 +321,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperatorsNoFloats +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperatorsNoFloats, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -420,15 +355,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperatorsNoFloats +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperatorsNoFloats, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -461,21 +389,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -558,21 +473,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -656,37 +558,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -754,96 +627,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -912,96 +697,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[LEAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1107,96 +804,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[LARGEST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1302,66 +911,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ALL_ONES_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1417,32 +968,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1498,31 +1025,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1578,96 +1082,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1774,38 +1190,8 @@ void acc_combined() { // CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp index 63d69529bee53..f65cd8aa414bd 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp @@ -1,4 +1,6 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + template void acc_combined() { T someVar; @@ -137,24 +139,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -191,25 +177,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -246,25 +215,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -308,25 +260,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -370,25 +305,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -435,24 +353,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp index 78b43ddc8f182..ca6f0ea60dc34 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s template void acc_combined() { @@ -41,7 +42,7 @@ void acc_combined() { #pragma acc parallel loop reduction(max:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -64,7 +65,7 @@ void acc_combined() { #pragma acc parallel loop reduction(min:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -87,7 +88,7 @@ void acc_combined() { #pragma acc parallel loop reduction(&:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -190,24 +191,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -244,25 +229,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -298,26 +266,9 @@ void acc_combined() { #pragma acc parallel loop reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -360,26 +311,9 @@ void acc_combined() { #pragma acc parallel loop reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -422,26 +356,9 @@ void acc_combined() { #pragma acc parallel loop reduction(&:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.int<-1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -478,24 +395,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -532,24 +433,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -586,25 +471,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -651,24 +519,8 @@ void acc_combined() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c index 6ec1c43ebbe45..cba01cab6d341 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -std=c23 -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -std=c23 -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s struct DefaultOperators { int i; @@ -22,22 +23,10 @@ void acc_compute() { #pragma acc parallel reduction(+:someVar) // CHECK: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init"] +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[ALLOCA]] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_anon_struct +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[BITCAST]] : !rec_anon_struct, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -81,22 +70,10 @@ void acc_compute() { #pragma acc parallel reduction(*:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init"] +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[ALLOCA]] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, {{.*}}, #cir.fp<1{{.*}}> : !cir.double, #true, {{.*}}}> : !rec_anon_struct +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[BITCAST]] : !rec_anon_struct, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -140,22 +117,10 @@ void acc_compute() { #pragma acc parallel reduction(max:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init"] +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[ALLOCA]] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, {{.*}}, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false, {{.*}}}> : !rec_anon_struct +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[BITCAST]] : !rec_anon_struct, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -241,22 +206,10 @@ void acc_compute() { #pragma acc parallel reduction(min:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init"] +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[ALLOCA]] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, {{.*}}, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true, {{.*}}}> : !rec_anon_struct +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[BITCAST]] : !rec_anon_struct, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -342,16 +295,10 @@ void acc_compute() { #pragma acc parallel reduction(&:someVarNoFloats) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init"] +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[ALLOCA]] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true, {{.*}}}> : !rec_anon_struct1 +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[BITCAST]] : !rec_anon_struct1, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -383,16 +330,10 @@ void acc_compute() { #pragma acc parallel reduction(|:someVarNoFloats) // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init"] +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[ALLOCA]] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_anon_struct1 +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[BITCAST]] : !rec_anon_struct1, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -424,16 +365,10 @@ void acc_compute() { #pragma acc parallel reduction(^:someVarNoFloats) // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init"] +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[ALLOCA]] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_anon_struct1 +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[BITCAST]] : !rec_anon_struct1, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -465,22 +400,10 @@ void acc_compute() { #pragma acc parallel reduction(&&:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init"] +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[ALLOCA]] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, {{.*}}, #cir.fp<1{{.*}}> : !cir.double, #true, {{.*}}}> : !rec_anon_struct +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[BITCAST]] : !rec_anon_struct, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -565,22 +488,10 @@ void acc_compute() { #pragma acc parallel reduction(||:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init"] +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[ALLOCA]] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_anon_struct +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[BITCAST]] : !rec_anon_struct, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -667,24 +578,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators -// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -752,96 +647,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -910,96 +717,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[LEAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1108,96 +827,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[LARGEST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1307,66 +938,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ALL_ONES_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1422,24 +995,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperatorsNoFloats -// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1495,24 +1052,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperatorsNoFloats -// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1568,96 +1109,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1767,24 +1220,8 @@ void acc_compute() { // CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators -// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp index 7bd6f67a9e19e..43b0791250835 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s struct DefaultOperators { int i; @@ -24,21 +25,8 @@ void acc_compute() { // CHECK: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -83,21 +71,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -142,21 +117,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -240,21 +202,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -338,15 +287,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !rec_DefaultOperatorsNoFloats, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -379,15 +321,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperatorsNoFloats +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperatorsNoFloats, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -420,15 +355,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperatorsNoFloats +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperatorsNoFloats, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -461,21 +389,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -558,21 +473,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -656,37 +558,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -754,96 +627,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST_ARR:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARR]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -912,96 +697,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[LEAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST_ARR:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARR]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1107,96 +804,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[LARGEST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST_ARR:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARR]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1302,66 +911,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ALL_ONES_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST_ARR:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARR]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1417,32 +968,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1498,31 +1025,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1578,96 +1082,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST_ARR:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARR]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1774,38 +1190,8 @@ void acc_compute() { // CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c index 13c335b867044..cd4d2dcb9fa4b 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s void acc_compute() { float someVar; @@ -139,24 +140,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -193,25 +178,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -248,25 +216,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -310,25 +261,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -372,25 +306,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -438,24 +355,8 @@ void acc_compute() { // CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp index 67378210ba83c..c1385ab830f00 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s template void acc_compute() { @@ -138,24 +139,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -192,25 +177,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -247,25 +215,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -309,25 +260,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -371,25 +305,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -436,24 +353,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c index be7b12350360d..440f8f9f8fbf7 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s void acc_compute() { int someVar; @@ -40,7 +41,7 @@ void acc_compute() { #pragma acc parallel reduction(max:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -63,7 +64,7 @@ void acc_compute() { #pragma acc parallel reduction(min:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -86,7 +87,7 @@ void acc_compute() { #pragma acc parallel reduction(&:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -189,24 +190,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[INIT_VAL:.*]] = cir.const {{.*}} : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[INIT_VAL]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -243,25 +228,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[INIT_VAL:.*]] = cir.const {{.*}} : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[INIT_VAL]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -297,26 +265,9 @@ void acc_compute() { #pragma acc parallel reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[INIT_VAL:.*]] = cir.const {{.*}} : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[INIT_VAL]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -359,26 +310,9 @@ void acc_compute() { #pragma acc parallel reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[INIT_VAL:.*]] = cir.const {{.*}} : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[INIT_VAL]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -421,26 +355,9 @@ void acc_compute() { #pragma acc parallel reduction(&:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[INIT_VAL:.*]] = cir.const {{.*}} : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[INIT_VAL]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -477,24 +394,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[INIT_VAL:.*]] = cir.const {{.*}} : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[INIT_VAL]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -531,24 +432,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[INIT_VAL:.*]] = cir.const {{.*}} : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[INIT_VAL]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -585,25 +470,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[INIT_VAL:.*]] = cir.const {{.*}} : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[INIT_VAL]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -650,24 +518,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[INIT_VAL:.*]] = cir.const {{.*}} : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[INIT_VAL]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp index fb6984fcd0068..db1b18e3fb8b7 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s template void acc_compute() { @@ -41,7 +42,7 @@ void acc_compute() { #pragma acc parallel reduction(max:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -64,7 +65,7 @@ void acc_compute() { #pragma acc parallel reduction(min:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -87,7 +88,7 @@ void acc_compute() { #pragma acc parallel reduction(&:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -190,24 +191,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -244,25 +229,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[CONST_ARR:.*]] = cir.const #cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[CONST_ARR]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -298,26 +266,9 @@ void acc_compute() { #pragma acc parallel reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST_ARR:.*]] = cir.const #cir.const_array<[#cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[CONST_ARR]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -360,26 +311,9 @@ void acc_compute() { #pragma acc parallel reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST_ARR:.*]] = cir.const #cir.const_array<[#cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[CONST_ARR]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -422,26 +356,9 @@ void acc_compute() { #pragma acc parallel reduction(&:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST_ARR:.*]] = cir.const #cir.const_array<[#cir.int<-1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[CONST_ARR]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -478,24 +395,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array loc(#loc12) +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> loc(#loc12) // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -532,24 +433,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array loc(#loc12) +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> loc(#loc12) // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -586,25 +471,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -651,24 +519,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array loc(#loc12) +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> loc(#loc12) // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c index 9b10a296e99f5..54784f35266d5 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s void acc_compute() { unsigned int someVar; @@ -40,7 +41,7 @@ void acc_compute() { #pragma acc parallel reduction(max:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSj : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !u32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -63,7 +64,7 @@ void acc_compute() { #pragma acc parallel reduction(min:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSj : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !u32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -86,7 +87,7 @@ void acc_compute() { #pragma acc parallel reduction(&:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSj : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !u32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -190,24 +191,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -244,25 +229,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.int<1> : !u32i, #cir.int<1> : !u32i, #cir.int<1> : !u32i, #cir.int<1> : !u32i, #cir.int<1> : !u32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -298,26 +266,9 @@ void acc_compute() { #pragma acc parallel reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store {{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -360,26 +311,9 @@ void acc_compute() { #pragma acc parallel reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.int<4294967295> : !u32i, #cir.int<4294967295> : !u32i, #cir.int<4294967295> : !u32i, #cir.int<4294967295> : !u32i, #cir.int<4294967295> : !u32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -422,26 +356,9 @@ void acc_compute() { #pragma acc parallel reduction(&:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.int<4294967295> : !u32i, #cir.int<4294967295> : !u32i, #cir.int<4294967295> : !u32i, #cir.int<4294967295> : !u32i, #cir.int<4294967295> : !u32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -478,24 +395,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -532,24 +433,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -586,25 +471,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.int<1> : !u32i, #cir.int<1> : !u32i, #cir.int<1> : !u32i, #cir.int<1> : !u32i, #cir.int<1> : !u32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -652,24 +520,8 @@ void acc_compute() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp index 11ebd7b4c26cb..a6f9e33bc25e0 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s struct DefaultOperators { int i; @@ -24,21 +25,8 @@ void acc_loop() { // CHECK: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -83,21 +71,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -142,21 +117,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -240,21 +202,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -338,15 +287,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperatorsNoFloats, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -379,15 +321,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperatorsNoFloats +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperatorsNoFloats, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -420,15 +355,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS24DefaultOperatorsNoFloats : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperatorsNoFloats, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperatorsNoFloats +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperatorsNoFloats, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -461,21 +389,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -558,21 +473,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !rec_DefaultOperators, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -656,37 +558,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -754,96 +627,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -912,96 +697,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[LEAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<-2147483648> : !s32i, #cir.int<0> : !u32i, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-1.7{{.*}}E+308> : !cir.double, #false}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1107,96 +804,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[LARGEST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<2147483647> : !s32i, #cir.int<4294967295> : !u32i, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<1.7{{.*}}E+308> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1302,66 +911,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ALL_ONES_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats, #cir.const_record<{#cir.int<-1> : !s32i, #cir.int<4294967295> : !u32i, #true}> : !rec_DefaultOperatorsNoFloats]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1417,32 +968,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1498,31 +1025,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_24DefaultOperatorsNoFloats : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1578,96 +1082,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// +// CHECK-NEXT: %[[CONST:.*]] = cir.const #cir.const_array<[#cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators, #cir.const_record<{#cir.int<1> : !s32i, #cir.int<1> : !u32i, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.double, #true}> : !rec_DefaultOperators]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -1774,38 +1190,8 @@ void acc_loop() { // CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp index 57cc1afec2911..6e5af5c3ae322 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s template void acc_loop() { @@ -138,24 +139,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -192,25 +177,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -247,25 +215,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float, #cir.fp<-3.4{{.*}}E+38> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -309,25 +260,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float, #cir.fp<3.4{{.*}}E+38> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -371,25 +305,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float, #cir.fp<1{{.*}}> : !cir.float]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -436,24 +353,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp index f60dff9385412..8baf77966efc1 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s template void acc_loop() { @@ -41,7 +42,7 @@ void acc_loop() { #pragma acc loop reduction(max:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -64,7 +65,7 @@ void acc_loop() { #pragma acc loop reduction(min:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -87,7 +88,7 @@ void acc_loop() { #pragma acc loop reduction(&:someVar) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] // CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield @@ -190,24 +191,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -244,25 +229,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -298,26 +266,9 @@ void acc_loop() { #pragma acc loop reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i, #cir.int<-2147483648> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -360,26 +311,9 @@ void acc_loop() { #pragma acc loop reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i, #cir.int<2147483647> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -422,26 +356,9 @@ void acc_loop() { #pragma acc loop reduction(&:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.int<-1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -478,24 +395,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -532,24 +433,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -586,25 +471,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[ONE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[TWO_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[THREE_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride %[[DECAY]], %[[FOUR_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[CONST_ARRAY:.*]] = cir.const #cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[CONST_ARRAY]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -651,24 +519,8 @@ void acc_loop() { // CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride %[[DECAY]], %[[LAST_IDX]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride %[[TEMP_LOAD]], %[[ONE]] : (!cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.array, !cir.ptr> // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { diff --git a/clang/test/CIR/Lowering/array.cpp b/clang/test/CIR/Lowering/array.cpp index 40ad986b7fdfa..de4a77072b930 100644 --- a/clang/test/CIR/Lowering/array.cpp +++ b/clang/test/CIR/Lowering/array.cpp @@ -60,24 +60,7 @@ void func2() { // CHECK: define{{.*}} void @_Z5func2v() // CHECK: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4 -// CHECK: %[[TMP:.*]] = alloca ptr, i64 1, align 8 -// CHECK: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR]], i32 0 -// CHECK: store i32 5, ptr %[[ARR_PTR]], align 4 -// CHECK: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1 -// CHECK: store ptr %[[ELE_1_PTR]], ptr %[[TMP]], align 8 -// CHECK: %[[END_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 2 -// CHECK: br label %[[LOOP_BODY:.*]] -// CHECK: [[LOOP_NEXT:.*]]: -// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// CHECK: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]] -// CHECK: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]] -// CHECK: [[LOOP_BODY]]: -// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// CHECK: store i32 0, ptr %[[CUR]], align 4 -// CHECK: %[[NEXT:.*]] = getelementptr i32, ptr %[[CUR]], i64 1 -// CHECK: store ptr %[[NEXT]], ptr %[[TMP]], align 8 -// CHECK: br label %[[LOOP_NEXT:.*]] -// CHECK: [[LOOP_END]]: +// CHECK: store [2 x i32] [i32 5, i32 0], ptr %[[ARR]], align 4 // CHECK: ret void void func3() { @@ -85,10 +68,7 @@ void func3() { } // CHECK: define{{.*}} void @_Z5func3v() // CHECK: %[[ARR_ALLOCA:.*]] = alloca [2 x i32], i64 1, align 4 -// CHECK: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR_ALLOCA]], i32 0 -// CHECK: store i32 5, ptr %[[ARR_PTR]], align 4 -// CHECK: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1 -// CHECK: store i32 6, ptr %[[ELE_1_PTR]], align 4 +// CHECK: store [2 x i32] [i32 5, i32 6], ptr %[[ARR_ALLOCA]], align 4 void func4() { int arr[2][1] = {{5}, {6}}; @@ -97,12 +77,7 @@ void func4() { // CHECK: define{{.*}} void @_Z5func4v() // CHECK: %[[ARR_ALLOCA:.*]] = alloca [2 x [1 x i32]], i64 1, align 4 // CHECK: %[[INIT:.*]] = alloca i32, i64 1, align 4 -// CHECK: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_ALLOCA]], i32 0 -// CHECK: %[[ARR_0_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0 -// CHECK: store i32 5, ptr %[[ARR_0_0]], align 4 -// CHECK: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1 -// CHECK: %[[ARR_1_0:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0 -// CHECK: store i32 6, ptr %[[ARR_1_0]], align 4 +// CHECK: store [2 x [1 x i32]] {{\[}}[1 x i32] [i32 5], [1 x i32] [i32 6]], ptr %[[ARR_ALLOCA]], align 4 // CHECK: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_ALLOCA]], i32 0 // CHECK: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1 // CHECK: %[[ARR_1_0:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0 @@ -115,25 +90,7 @@ void func5() { } // CHECK: define{{.*}} void @_Z5func5v() // CHECK: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4 -// CHECK: %[[TMP:.*]] = alloca ptr, i64 1, align 8 -// CHECK: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0 -// CHECK: %[[ARR_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0 -// CHECK: store i32 5, ptr %[[ARR_0]], align 4 -// CHECK: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1 -// CHECK: store ptr %[[ARR_1]], ptr %[[TMP]], align 8 -// CHECK: %[[END_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 2 -// CHECK: br label %[[LOOP_BODY:.*]] -// CHECK: [[LOOP_NEXT:.*]]: -// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// CHECK: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]] -// CHECK: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]] -// CHECK: [[LOOP_BODY]]: -// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// CHECK: store [1 x i32] zeroinitializer, ptr %[[CUR]], align 4 -// CHECK: %[[NEXT:.*]] = getelementptr [1 x i32], ptr %[[CUR]], i64 1 -// CHECK: store ptr %[[NEXT]], ptr %[[TMP]], align 8 -// CHECK: br label %[[LOOP_NEXT:.*]] -// CHECK: [[LOOP_END]]: +// CHECK: store [2 x [1 x i32]] {{\[}}[1 x i32] [i32 5], [1 x i32] zeroinitializer], ptr %[[ARR]], align 4 // CHECK: ret void void func6() { @@ -155,22 +112,7 @@ void func7() { } // CHECK: define{{.*}} void @_Z5func7v() // CHECK: %[[ARR:.*]] = alloca [1 x ptr], i64 1, align 8 -// CHECK: %[[TMP:.*]] = alloca ptr, i64 1, align 8 -// CHECK: %[[ARR_PTR:.*]] = getelementptr ptr, ptr %[[ARR]], i32 0 -// CHECK: store ptr %[[ARR_PTR]], ptr %[[TMP]], align 8 -// CHECK: %[[END_PTR:.*]] = getelementptr ptr, ptr %[[ARR_PTR]], i64 1 -// CHECK: br label %[[LOOP_BODY:.*]] -// CHECK: [[LOOP_NEXT:.*]]: -// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// CHECK: %[[CMP:.*]] = icmp ne ptr %[[CUR]], %[[END_PTR]] -// CHECK: br i1 %[[CMP]], label %[[LOOP_BODY]], label %[[LOOP_END:.*]] -// CHECK: [[LOOP_BODY]]: -// CHECK: %[[CUR:.*]] = load ptr, ptr %[[TMP]], align 8 -// CHECK: store ptr null, ptr %[[CUR]], align 8 -// CHECK: %[[NEXT:.*]] = getelementptr ptr, ptr %[[CUR]], i64 1 -// CHECK: store ptr %[[NEXT]], ptr %[[TMP]], align 8 -// CHECK: br label %[[LOOP_NEXT:.*]] -// CHECK: [[LOOP_END]]: +// CHECK: store [1 x ptr] zeroinitializer, ptr %[[ARR]], align 8 // CHECK: ret void void func8(int p[10]) {} diff --git a/clang/test/CodeGen/WebAssembly/musttail.c b/clang/test/CodeGen/WebAssembly/musttail.c new file mode 100644 index 0000000000000..37fed70028bbc --- /dev/null +++ b/clang/test/CodeGen/WebAssembly/musttail.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 %s -triple wasm32-unknown-unknown -target-feature +tail-call -o /dev/null -emit-llvm -verify=tail +// RUN: %clang_cc1 %s -triple wasm32-unknown-unknown -o /dev/null -emit-llvm -verify=notail + +int foo(int x) { + return x; +} + +#if __has_attribute(musttail) +// tail-warning@+1 {{HAS IT}} +#warning HAS IT +#else +// notail-warning@+1 {{DOES NOT HAVE}} +#warning DOES NOT HAVE +#endif + +int bar(int x) +{ + // notail-warning@+1 {{unknown attribute 'clang::musttail' ignored}} + [[clang::musttail]] return foo(1); +} diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c index 834e140018c34..0b73c7b14d869 100644 --- a/clang/test/CodeGen/X86/avx512bw-builtins.c +++ b/clang/test/CodeGen/X86/avx512bw-builtins.c @@ -1205,12 +1205,16 @@ __m512i test_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B) { // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_packs_epi32(__M,__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_packs_epi32((__mmask32)0xAAAAAAAA,(__m512i)(__v16si){40000,-50000,32767,-32768,70000,-70000,42,-42,0,1,-1,30000,32768,-32769,65535,-65536},(__m512i)(__v16si){0,1,-1,65536,-1000000,1000000,32768,-32769,123456,-123456,32767,-32768,22222,-22222,40000,-40000}),0,-32768,0,-32768,0,1,0,32767,0,-32768,0,-42,0,32767,0,-32768,0,1,0,30000,0,-32768,0,-32768,0,-32768,0,-32768,0,-22222,0,-32768)); + __m512i test_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_packs_epi32 // CHECK: @llvm.x86.avx512.packssdw.512 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_packs_epi32(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_packs_epi32((__m512i)(__v32hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32},(__mmask32)0xAAAAAAAA,(__m512i)(__v16si){40000,-50000,32767,-32768,70000,-70000,42,-42,0,1,-1,30000,32768,-32769,65535,-65536},(__m512i)(__v16si){0,1,-1,65536,-1000000,1000000,32768,-32769,123456,-123456,32767,-32768,22222,-22222,40000,-40000}),1,-32768,3,-32768,5,1,7,32767,9,-32768,11,-42,13,32767,15,-32768,17,1,19,30000,21,-32768,23,-32768,25,-32768,27,-32768,29,-22222,31,-32768)); + __m512i test_mm512_packs_epi16(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_packs_epi16 // CHECK: @llvm.x86.avx512.packsswb.512 @@ -1223,48 +1227,62 @@ __m512i test_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_packs_epi16(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_mask_packs_epi16((__m512i)(__v64qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v32hi){130,-200,127,-128,300,-1000,42,-42,32767,-32767,127,-128,30000,-30000,90,-90,130,-200,0,-1,126,-127,128,-129,500,-500,7,-7,255,-255,127,-128},(__m512i)(__v32hi){0,1,-1,255,-129,128,20000,-32768,5,-5,100,-100,127,-128,512,-512,1,2,-2,300,-300,127,-128,42,0,1,-1,127,-128,90,-90,-32768}),1,-128,3,-128,5,-128,7,-42,9,1,11,127,13,127,15,-128,17,-128,19,-128,21,-128,23,-90,25,-5,27,-100,29,-128,31,-128,33,-128,35,-1,37,-127,39,-128,41,2,43,127,45,127,47,42,49,-128,51,-7,53,-128,55,-128,57,1,59,127,61,90,63,-128)); + __m512i test_mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_packs_epi16 // CHECK: @llvm.x86.avx512.packsswb.512 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_packs_epi16(__M,__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_packs_epi16((__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v32hi){130,-200,127,-128,300,-1000,42,-42,32767,-32767,127,-128,30000,-30000,90,-90,130,-200,0,-1,126,-127,128,-129,500,-500,7,-7,255,-255,127,-128},(__m512i)(__v32hi){0,1,-1,255,-129,128,20000,-32768,5,-5,100,-100,127,-128,512,-512,1,2,-2,300,-300,127,-128,42,0,1,-1,127,-128,90,-90,-32768}),0,-128,0,-128,0,-128,0,-42,0,1,0,127,0,127,0,-128,0,-128,0,-128,0,-128,0,-90,0,-5,0,-100,0,-128,0,-128,0,-128,0,-1,0,-127,0,-128,0,2,0,127,0,127,0,42,0,-128,0,-7,0,-128,0,-128,0,1,0,127,0,90,0,-128)); + __m512i test_mm512_packus_epi32(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_packus_epi32 // CHECK: @llvm.x86.avx512.packusdw.512 return _mm512_packus_epi32(__A,__B); } TEST_CONSTEXPR(match_v32hi(_mm512_packus_epi32((__m512i)(__v16si){40000, -50000, 32767, -32768, 70000, -70000, 42, -42, 0, 1, -1, 65535, 32768, -32769, 22222, -22222}, (__m512i)(__v16si){0, 1, -1, 65536, -1000000, 1000000, 32768, -32769, 123456, -123456, 32767, -32768, 40000, -40000, 65535, 0}), -25536, 0, 32767, 0, 0, 1, 0, -1, -1, 0, 42, 0, 0, -1, -32768, 0, 0, 1, 0, -1, -1, 0, 32767, 0, -32768, 0, 22222, 0, -25536, 0, -1, 0)); + __m512i test_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_packus_epi32 // CHECK: @llvm.x86.avx512.packusdw.512 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_packus_epi32(__M,__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_packus_epi32((__mmask32)0xAAAAAAAA,(__m512i)(__v16si){40000,-50000,32767,-32768,70000,-70000,42,-42,0,1,-1,65535,32768,-32769,22222,-22222},(__m512i)(__v16si){0,1,-1,65536,-1000000,1000000,32768,-32769,123456,-123456,32767,-32768,40000,-40000,65535,0}),0,0,0,0,0,1,0,-1,0,0,0,0,0,-1,0,0,0,1,0,-1,0,0,0,0,0,0,0,0,0,0,0,0)); + __m512i test_mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_packus_epi32 // CHECK: @llvm.x86.avx512.packusdw.512 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_packus_epi32(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_packus_epi32((__m512i)(__v32hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32},(__mmask32)0xAAAAAAAA,(__m512i)(__v16si){40000,-50000,32767,-32768,70000,-70000,42,-42,0,1,-1,65535,32768,-32769,22222,-22222},(__m512i)(__v16si){0,1,-1,65536,-1000000,1000000,32768,-32769,123456,-123456,32767,-32768,40000,-40000,65535,0}),1,0,3,0,5,1,7,-1,9,0,11,0,13,-1,15,0,17,1,19,-1,21,0,23,0,25,0,27,0,29,0,31,0)); + __m512i test_mm512_packus_epi16(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_packus_epi16 // CHECK: @llvm.x86.avx512.packuswb.512 return _mm512_packus_epi16(__A,__B); } TEST_CONSTEXPR(match_v64qi(_mm512_packus_epi16((__m512i)(__v32hi){-1, 0, 1, 127, 128, 255, 256, -200, 300, 42, -42, 500, 20000, -32768, 129, -129, -1, 0, 1, 127, 128, 255, 256, -200, 300, 42, -42, 500, 20000, -32768, 129, -129}, (__m512i)(__v32hi){0, 1, -1, 255, -129, 128, 20000, -32768, 32767, -32767, 127, -128, 30000, -30000, 90, -90, 0, 1, -1, 255, -129, 128, 20000, -32768, 32767, -32767, 127, -128, 30000, -30000, 90, -90}), 0, 0, 1, 127, -128, -1, -1, 0, 0, 1, 0, -1, 0, -128, -1, 0, -1, 42, 0, -1, -1, 0, -127, 0, -1, 0, 127, 0, -1, 0, 90, 0, 0, 0, 1, 127, -128, -1, -1, 0, 0, 1, 0, -1, 0, -128, -1, 0, -1, 42, 0, -1, -1, 0, -127, 0, -1, 0, 127, 0, -1, 0, 90, 0)); + __m512i test_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_packus_epi16 // CHECK: @llvm.x86.avx512.packuswb.512 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_packus_epi16(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_mask_packus_epi16((__m512i)(__v64qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v32hi){-1,0,1,127,128,255,256,-200,300,42,-42,500,20000,-32768,129,-129,-1,0,1,127,128,255,256,-200,300,42,-42,500,20000,-32768,129,-129},(__m512i)(__v32hi){0,1,-1,255,-129,128,20000,-32768,32767,-32767,127,-128,30000,-30000,90,-90,0,1,-1,255,-129,128,20000,-32768,32767,-32767,127,-128,30000,-30000,90,-90}),1,0,3,127,5,-1,7,0,9,1,11,-1,13,-128,15,0,17,42,19,-1,21,0,23,0,25,0,27,0,29,0,31,0,33,0,35,127,37,-1,39,0,41,1,43,-1,45,-128,47,0,49,42,51,-1,53,0,55,0,57,0,59,0,61,0,63,0)); + __m512i test_mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_packus_epi16 // CHECK: @llvm.x86.avx512.packuswb.512 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_packus_epi16(__M,__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_packus_epi16((__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v32hi){-1,0,1,127,128,255,256,-200,300,42,-42,500,20000,-32768,129,-129,-1,0,1,127,128,255,256,-200,300,42,-42,500,20000,-32768,129,-129},(__m512i)(__v32hi){0,1,-1,255,-129,128,20000,-32768,32767,-32767,127,-128,30000,-30000,90,-90,0,1,-1,255,-129,128,20000,-32768,32767,-32767,127,-128,30000,-30000,90,-90}),0,0,0,127,0,-1,0,0,0,1,0,-1,0,-128,0,0,0,42,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,127,0,-1,0,0,0,1,0,-1,0,-128,0,0,0,42,0,-1,0,0,0,0,0,0,0,0,0,0,0,0)); + __m512i test_mm512_adds_epi8(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_adds_epi8 // CHECK: @llvm.sadd.sat.v64i8 @@ -1278,18 +1296,22 @@ __m512i test_mm512_mask_adds_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m51 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_adds_epi8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_mask_adds_epi8((__m512i)(__v64qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qs){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,-28,+29,-30,+31,-32,+33,-34,+35,-36,+37,-38,+39,-40,+41,-42,+43,-44,+45,-46,+47,+100,+50,-100,+20,+80,-50,+120,-20,-100,-50,+100,-20,-80,+50,-120,+20},(__m512i)(__v64qs){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,-28,+29,-30,+31,-32,+33,-34,+35,-36,+37,-38,+39,-40,+41,-42,+43,-44,+45,-46,+47,+50,+80,-50,+110,+60,-30,+20,-10,+50,+80,-50,+110,+60,-30,+20,-10}),1,+2,3,+6,5,+10,7,+14,9,+18,11,+22,13,+26,15,+30,17,+34,19,+38,21,+42,23,+46,25,+50,27,+54,29,+58,31,+62,33,+66,35,+70,37,+74,39,+78,41,+82,43,+86,45,+90,47,+94,49,+127,51,+127,53,-80,+55,-30,57,+30,59,+90,61,+20,63,+10)); + __m512i test_mm512_maskz_adds_epi8(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_adds_epi8 // CHECK: @llvm.sadd.sat.v64i8 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_adds_epi8(__U,__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_adds_epi8((__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qs){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,-28,+29,-30,+31,-32,+33,-34,+35,-36,+37,-38,+39,-40,+41,-42,+43,-44,+45,-46,+47,+100,+50,-100,+20,+80,-50,+120,-20,-100,-50,+100,-20,-80,+50,-120,+20},(__m512i)(__v64qs){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,-28,+29,-30,+31,-32,+33,-34,+35,-36,+37,-38,+39,-40,+41,-42,+43,-44,+45,-46,+47,+50,+80,-50,+110,+60,-30,+20,-10,+50,+80,-50,+110,+60,-30,+20,-10}),0,+2,0,+6,0,+10,0,+14,0,+18,0,+22,0,+26,0,+30,0,+34,0,+38,0,+42,0,+46,0,+50,0,+54,0,+58,0,+62,0,+66,0,+70,0,+74,0,+78,0,+82,0,+86,0,+90,0,+94,0,+127,0,+127,0,-80,0,-30,0,+30,0,+90,0,+20,0,+10)); + __m512i test_mm512_adds_epi16(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_adds_epi16 // CHECK: @llvm.sadd.sat.v32i16 return _mm512_adds_epi16(__A,__B); } -TEST_CONSTEXPR(match_v32hi(_mm512_adds_epi16((__m512i)(__v32hi){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, +32000, -32000, +32000, -32000}, (__m512i)(__v32hi){0, +1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, +800, -800, -800, +800}), 0, +2, -4, +6, -8, +10, -12, +14, -16, +18, -20, +22, -24, +26, -28, +30, -32, +34, -36, +38, -40, +42, -44, +46, -48, +50, -52, +54, +32767, -32768, +31200, -31200)); +TEST_CONSTEXPR(match_v32hi(_mm512_adds_epi16((__m512i)(__v32hi){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,+32000,-32000,+32000,-32000},(__m512i)(__v32hi){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,+800,-800,-800,+800}),0,+2,-4,+6,-8,+10,-12,+14,-16,+18,-20,+22,-24,+26,-28,+30,-32,+34,-36,+38,-40,+42,-44,+46,-48,+50,-52,+54,+32767,-32768,+31200,-31200)); __m512i test_mm512_mask_adds_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_adds_epi16 @@ -1297,12 +1319,16 @@ __m512i test_mm512_mask_adds_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m5 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_adds_epi16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_adds_epi16((__m512i)(__v32hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32},(__mmask32)0xAAAAAAAAu,(__m512i)(__v32hi){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,+32000,-32000,+32000,+32000},(__m512i)(__v32hi){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,+800,-800,-800,+800}),1,+2,3,+6,5,+10,7,+14,9,+18,11,+22,13,+26,15,+30,17,+34,19,+38,21,+42,23,+46,25,+50,27,+54,29,-32768,31,+32767)); + __m512i test_mm512_maskz_adds_epi16(__mmask32 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_adds_epi16 // CHECK: @llvm.sadd.sat.v32i16 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_adds_epi16(__U,__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_adds_epi16((__mmask32)0xAAAAAAAAu,(__m512i)(__v32hi){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,+32000,-32000,+32000,+32000},(__m512i)(__v32hi){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,-16,+17,-18,+19,-20,+21,-22,+23,-24,+25,-26,+27,+800,-800,-800,+800}),0,+2,0,+6,0,+10,0,+14,0,+18,0,+22,0,+26,0,+30,0,+34,0,+38,0,+42,0,+46,0,+50,0,+54,0,-32768,0,+32767)); + __m512i test_mm512_adds_epu8(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_adds_epu8 // CHECK-NOT: @llvm.x86.avx512.mask.paddus.b.512 @@ -1318,7 +1344,7 @@ __m512i test_mm512_mask_adds_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m51 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_adds_epu8(__W,__U,__A,__B); } -TEST_CONSTEXPR(match_v32hu(_mm512_adds_epu16((__m512i)(__v32hu){0, 0, 0, 0, +16384, +16384, +16384, +16384, +16384, +16384, +32767, +32767, +32767, +32767, +32767, +32767, +32768, +32768, +32768, +32768, +32768, +32768, +49152, +49152, +49152, +49152, +49152, +49152, +65535, +65535, +65535, +65535}, (__m512i)(__v32hu){0, +32767, +32768, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +32767, +32768, +65535}), 0, +32767, +32768, +65535, +16384, +32768, +49151, +49152, +65535, +65535, +32767, +49151, +65534, +65535, +65535, +65535, +32768, +49152, +65535, +65535, +65535, +65535, +49152, +65535, +65535, +65535, +65535, +65535, +65535, +65535, +65535, +65535)); +TEST_CONSTEXPR(match_v64qu(_mm512_mask_adds_epu8((__m512i)(__v64qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qu){0,0,0,0,0,0,0,0,+63,+63,+63,+63,+63,+63,+63,+63,+64,+64,+64,+64,+64,+64,+64,+64,+127,+127,+127,+127,+127,+127,+127,+127,+128,+128,+128,+128,+128,+128,+128,+128,+191,+191,+191,+191,+191,+191,+191,+191,+192,+192,+192,+192,+192,+192,+192,+192,+255,+255,+255,+255,+255,+255,+255,+255},(__m512i)(__v64qu){0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255}),1,+63,3,+127,5,+191,7,+255,9,+126,11,+190,13,+254,15,+255,17,+127,19,+191,21,+255,23,+255,25,+190,27,+254,29,+255,31,+255,33,+191,35,+255,37,+255,39,+255,41,+254,43,+255,45,+255,47,+255,49,+255,51,+255,53,+255,55,+255,57,+255,59,+255,61,+255,63,+255)); __m512i test_mm512_maskz_adds_epu8(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_adds_epu8 @@ -1327,12 +1353,16 @@ __m512i test_mm512_maskz_adds_epu8(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_adds_epu8(__U,__A,__B); } +TEST_CONSTEXPR(match_v64qu(_mm512_maskz_adds_epu8((__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qu){0,0,0,0,0,0,0,0,+63,+63,+63,+63,+63,+63,+63,+63,+64,+64,+64,+64,+64,+64,+64,+64,+127,+127,+127,+127,+127,+127,+127,+127,+128,+128,+128,+128,+128,+128,+128,+128,+191,+191,+191,+191,+191,+191,+191,+191,+192,+192,+192,+192,+192,+192,+192,+192,+255,+255,+255,+255,+255,+255,+255,+255},(__m512i)(__v64qu){0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255}),0,+63,0,+127,0,+191,0,+255,0,+126,0,+190,0,+254,0,+255,0,+127,0,+191,0,+255,0,+255,0,+190,0,+254,0,+255,0,+255,0,+191,0,+255,0,+255,0,+255,0,+254,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255)); + __m512i test_mm512_adds_epu16(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_adds_epu16 // CHECK-NOT: @llvm.x86.avx512.mask.paddus.w.512 // CHECK: call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}) return _mm512_adds_epu16(__A,__B); } +TEST_CONSTEXPR(match_v32hu(_mm512_adds_epu16((__m512i)(__v32hu){0, 0, 0, 0, +16384, +16384, +16384, +16384, +16384, +16384, +32767, +32767, +32767, +32767, +32767, +32767, +32768, +32768, +32768, +32768, +32768, +32768, +49152, +49152, +49152, +49152, +49152, +49152, +65535, +65535, +65535, +65535}, (__m512i)(__v32hu){0, +32767, +32768, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +32767, +32768, +65535}), 0, +32767, +32768, +65535, +16384, +32768, +49151, +49152, +65535, +65535, +32767, +49151, +65534, +65535, +65535, +65535, +32768, +49152, +65535, +65535, +65535, +65535, +49152, +65535, +65535, +65535, +65535, +65535, +65535, +65535, +65535, +65535)); + __m512i test_mm512_mask_adds_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_adds_epu16 // CHECK-NOT: @llvm.x86.avx512.mask.paddus.w.512 @@ -1340,6 +1370,8 @@ __m512i test_mm512_mask_adds_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m5 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_adds_epu16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v32hu(_mm512_mask_adds_epu16((__m512i)(__v32hu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32},(__mmask32)0xAAAAAAAA,(__m512i)(__v32hu){0,0,0,0,+16384,+16384,+16384,+16384,+16384,+16384,+32767,+32767,+32767,+32767,+32767,+32767,+32768,+32768,+32768,+32768,+32768,+32768,+49152,+49152,+49152,+49152,+49152,+49152,+65535,+65535,+65535,+65535},(__m512i)(__v32hu){0,+32767,+32768,+65535,0,+16384,+32767,+32768,+49152,+65535,0,+16384,+32767,+32768,+49152,+65535,0,+16384,+32767,+32768,+49152,+65535,0,+16384,+32767,+32768,+49152,+65535,0,+32767,+32768,+65535}),1,+32767,3,+65535,5,+32768,7,+49152,9,+65535,11,+49151,13,+65535,15,+65535,17,+49152,19,+65535,21,+65535,23,+65535,25,+65535,27,+65535,29,+65535,31,+65535)); + __m512i test_mm512_maskz_adds_epu16(__mmask32 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_adds_epu16 // CHECK-NOT: @llvm.x86.avx512.mask.paddus.w.512 @@ -1347,6 +1379,8 @@ __m512i test_mm512_maskz_adds_epu16(__mmask32 __U, __m512i __A, __m512i __B) { // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_adds_epu16(__U,__A,__B); } +TEST_CONSTEXPR(match_v32hu(_mm512_maskz_adds_epu16((__mmask32)0xAAAAAAAA,(__m512i)(__v32hu){0,0,0,0,+16384,+16384,+16384,+16384,+16384,+16384,+32767,+32767,+32767,+32767,+32767,+32767,+32768,+32768,+32768,+32768,+32768,+32768,+49152,+49152,+49152,+49152,+49152,+49152,+65535,+65535,+65535,+65535},(__m512i)(__v32hu){0,+32767,+32768,+65535,0,+16384,+32767,+32768,+49152,+65535,0,+16384,+32767,+32768,+49152,+65535,0,+16384,+32767,+32768,+49152,+65535,0,+16384,+32767,+32768,+49152,+65535,0,+32767,+32768,+65535}),0,+32767,0,+65535,0,+32768,0,+49152,0,+65535,0,+49151,0,+65535,0,+65535,0,+49152,0,+65535,0,+65535,0,+65535,0,+65535,0,+65535,0,+65535,0,+65535)); + __m512i test_mm512_avg_epu8(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_avg_epu8 // CHECK: @llvm.x86.avx512.pavg.b.512 @@ -1640,12 +1674,16 @@ __m512i test_mm512_mask_subs_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m51 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_subs_epi8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_mask_subs_epi8((__m512i)(__v64qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qs){1,-100,3,4,5,-6,7,100,9,-100,11,12,13,-14,15,100,17,-100,19,20,21,-22,23,100,25,-100,27,28,29,-30,31,100,33,-100,35,36,37,-38,39,100,41,-100,43,44,45,-46,47,100,49,-100,51,52,53,-54,55,100,57,-100,59,60,61,-62,63,100},(__m512i)(__v64qs){1,100,3,4,5,6,7,-100,9,100,11,12,13,14,15,-100,17,100,19,20,21,22,23,-100,25,100,27,28,29,30,31,-100,33,100,35,36,37,38,39,-100,41,100,43,44,45,46,47,-100,49,100,51,52,53,54,55,-100,57,100,59,60,61,62,63,-100}),1,-128,3,0,5,-12,7,127,9,-128,11,0,13,-28,15,127,17,-128,19,0,21,-44,23,127,25,-128,27,0,29,-60,31,127,33,-128,35,0,37,-76,39,127,41,-128,43,0,45,-92,47,127,49,-128,51,0,53,-108,55,127,57,-128,59,0,61,-124,63,127)); + __m512i test_mm512_maskz_subs_epi8(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_subs_epi8 // CHECK: @llvm.ssub.sat.v64i8 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_subs_epi8(__U,__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_subs_epi8((__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qs){1,-100,3,4,5,-6,7,100,9,-100,11,12,13,-14,15,100,17,-100,19,20,21,-22,23,100,25,-100,27,28,29,-30,31,100,33,-100,35,36,37,-38,39,100,41,-100,43,44,45,-46,47,100,49,-100,51,52,53,-54,55,100,57,-100,59,60,61,-62,63,100},(__m512i)(__v64qs){1,100,3,4,5,6,7,-100,9,100,11,12,13,14,15,-100,17,100,19,20,21,22,23,-100,25,100,27,28,29,30,31,-100,33,100,35,36,37,38,39,-100,41,100,43,44,45,46,47,-100,49,100,51,52,53,54,55,-100,57,100,59,60,61,62,63,-100}),0,-128,0,0,0,-12,0,127,0,-128,0,0,0,-28,0,127,0,-128,0,0,0,-44,0,127,0,-128,0,0,0,-60,0,127,0,-128,0,0,0,-76,0,127,0,-128,0,0,0,-92,0,127,0,-128,0,0,0,-108,0,127,0,-128,0,0,0,-124,0,127)); + __m512i test_mm512_subs_epi16(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_subs_epi16 // CHECK: @llvm.ssub.sat.v32i16 @@ -1658,18 +1696,24 @@ __m512i test_mm512_mask_subs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m5 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_subs_epi16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_subs_epi16((__m512i)(__v32hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32},(__mmask32)0xAAAAAAAA,(__m512i)(__v32hi){1,-30000,3,30000,5,-6,7,8,9,-30000,11,30000,13,-14,15,16,17,-30000,19,30000,21,-22,23,24,25,-30000,27,30000,29,-30,31,32},(__m512i)(__v32hi){1,30000,3,-30000,5,6,7,-8,9,30000,11,-30000,13,14,15,-16,17,30000,19,-30000,21,22,23,-24,25,30000,27,-30000,29,30,31,-32}),1,-32768,3,32767,5,-12,7,16,9,-32768,11,32767,13,-28,15,32,17,-32768,19,32767,21,-44,23,48,25,-32768,27,32767,29,-60,31,64)); + __m512i test_mm512_maskz_subs_epi16(__mmask32 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_subs_epi16 // CHECK: @llvm.ssub.sat.v32i16 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_subs_epi16(__U,__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_subs_epi16((__mmask32)0xAAAAAAAAu,(__m512i)(__v32hi){1,-30000,3,30000,5,-6,7,8,9,-30000,11,30000,13,-14,15,16,17,-30000,19,30000,21,-22,23,24,25,-30000,27,30000,29,-30,31,32},(__m512i)(__v32hi){1,30000,3,-30000,5,6,7,-8,9,30000,11,-30000,13,14,15,-16,17,30000,19,-30000,21,22,23,-24,25,30000,27,-30000,29,30,31,-32}),0,-32768,0,32767,0,-12,0,16,0,-32768,0,32767,0,-28,0,32,0,-32768,0,32767,0,-44,0,48,0,-32768,0,32767,0,-60,0,64)); + __m512i test_mm512_subs_epu8(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_subs_epu8 // CHECK-NOT: @llvm.x86.avx512.mask.psubus.b.512 // CHECK: call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{.*}}, <64 x i8> %{{.*}}) return _mm512_subs_epu8(__A,__B); } +TEST_CONSTEXPR(match_v64qu(_mm512_subs_epu8((__m512i)(__v64qu){0,0,0,0,0,0,0,0,+63,+63,+63,+63,+63,+63,+63,+63,+64,+64,+64,+64,+64,+64,+64,+64,+127,+127,+127,+127,+127,+127,+127,+127,+128,+128,+128,+128,+128,+128,+128,+128,+191,+191,+191,+191,+191,+191,+191,+191,+192,+192,+192,+192,+192,+192,+192,+192,+255,+255,+255,+255,+255,+255,+255,+255},(__m512i)(__v64qu){0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255}),0,0,0,0,0,0,0,0,+63,0,0,0,0,0,0,0,+64,+1,0,0,0,0,0,0,+127,+64,+63,0,0,0,0,0,+128,+65,+64,+1,0,0,0,0,+191,+128,+127,+64,+63,0,0,0,+192,+129,+128,+65,+64,+1,0,0,+255,+192,+191,+128,+127,+64,+63,+0)); + __m512i test_mm512_mask_subs_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_subs_epu8 // CHECK-NOT: @llvm.x86.avx512.mask.psubus.b.512 @@ -1677,7 +1721,7 @@ __m512i test_mm512_mask_subs_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m51 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_subs_epu8(__W,__U,__A,__B); } -TEST_CONSTEXPR(match_v64qu(_mm512_subs_epu8((__m512i)(__v64qu){0, 0, 0, 0, 0, 0, 0, 0, +63, +63, +63, +63, +63, +63, +63, +63, +64, +64, +64, +64, +64, +64, +64, +64, +127, +127, +127, +127, +127, +127, +127, +127, +128, +128, +128, +128, +128, +128, +128, +128, +191, +191, +191, +191, +191, +191, +191, +191, +192, +192, +192, +192, +192, +192, +192, +192, +255, +255, +255, +255, +255, +255, +255, +255}, (__m512i)(__v64qu){0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255, 0, +63, +64, +127, +128, +191, +192, +255}), 0, 0, 0, 0, 0, 0, 0, 0, +63, 0, 0, 0, 0, 0, 0, 0, +64, +1, 0, 0, 0, 0, 0, 0, +127, +64, +63, 0, 0, 0, 0, 0, +128, +65, +64, +1, 0, 0, 0, 0, +191, +128, +127, +64, +63, 0, 0, 0, +192, +129, +128, +65, +64, +1, 0, 0, +255, +192, +191, +128, +127, +64, +63, +0)); +TEST_CONSTEXPR(match_v64qu(_mm512_mask_subs_epu8((__m512i)(__v64qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qu){0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255,0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255,0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255,0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255},(__m512i)(__v64qu){0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255,0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255,0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255,0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255}),1,200,3,0,5,0,7,254,9,0,11,1,13,1,15,0,17,200,19,0,21,0,23,254,25,0,27,1,29,1,31,0,33,200,35,0,37,0,39,254,41,0,43,1,45,1,47,0,49,200,51,0,53,0,55,254,57,0,59,1,61,1,63,0)); __m512i test_mm512_maskz_subs_epu8(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_subs_epu8 @@ -1686,20 +1730,25 @@ __m512i test_mm512_maskz_subs_epu8(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_subs_epu8(__U,__A,__B); } +TEST_CONSTEXPR(match_v64qu(_mm512_maskz_subs_epu8((__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qu){0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255,0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255,0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255,0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255},(__m512i)(__v64qu){0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255,0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255,0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255,0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255}),0,200,0,0,0,0,0,254,0,0,0,1,0,1,0,0,0,200,0,0,0,0,0,254,0,0,0,1,0,1,0,0,0,200,0,0,0,0,0,254,0,0,0,1,0,1,0,0,0,200,0,0,0,0,0,254,0,0,0,1,0,1,0,0)); + __m512i test_mm512_subs_epu16(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_subs_epu16 // CHECK-NOT: @llvm.x86.avx512.mask.psubus.w.512 // CHECK: call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}) return _mm512_subs_epu16(__A,__B); } +TEST_CONSTEXPR(match_v32hu(_mm512_subs_epu16((__m512i)(__v32hu){0, 0, 0, 0, +16384, +16384, +16384, +16384, +16384, +16384, +32767, +32767, +32767, +32767, +32767, +32767, +32768, +32768, +32768, +32768, +32768, +32768, +49152, +49152, +49152, +49152, +49152, +49152, +65535, +65535, +65535, +65535}, (__m512i)(__v32hu){0, +32767, +32768, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +32767, +32768, +65535}), 0, 0, 0, 0, +16384, 0, 0, 0, 0, 0, +32767, +16383, 0, 0, 0, 0, +32768, +16384, +1, 0, 0, 0, +49152, +32768, +16385, +16384, 0, 0, +65535, +32768, +32767, 0)); + __m512i test_mm512_mask_subs_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_subs_epu16 // CHECK-NOT: @llvm.x86.avx512.mask.psubus.w.512 // CHECK: call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{.*}}, <32 x i16> %{{.*}}) // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_subs_epu16(__W,__U,__A,__B); -TEST_CONSTEXPR(match_v32hu(_mm512_subs_epu16((__m512i)(__v32hu){0, 0, 0, 0, +16384, +16384, +16384, +16384, +16384, +16384, +32767, +32767, +32767, +32767, +32767, +32767, +32768, +32768, +32768, +32768, +32768, +32768, +49152, +49152, +49152, +49152, +49152, +49152, +65535, +65535, +65535, +65535}, (__m512i)(__v32hu){0, +32767, +32768, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +16384, +32767, +32768, +49152, +65535, 0, +32767, +32768, +65535}), 0, 0, 0, 0, +16384, 0, 0, 0, 0, 0, +32767, +16383, 0, 0, 0, 0, +32768, +16384, +1, 0, 0, 0, +49152, +32768, +16385, +16384, 0, 0, +65535, +32768, +32767, 0)); } +TEST_CONSTEXPR(match_v32hu(_mm512_mask_subs_epu16((__m512i)(__v32hu){101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132},(__mmask32)0xAAAAAAAAu,(__m512i)(__v32hu){0,65000,0,40000,0,100,0,65535,0,0,0,1000,0,1,0,50000,0,65000,0,40000,0,100,0,65535,0,0,0,1000,0,1,0,50000},(__m512i)(__v32hu){0,5000,0,40000,0,200,0,1,0,1,0,65535,0,0,0,25000,0,5000,0,40000,0,200,0,1,0,1,0,65535,0,0,0,25000}),101,60000,103,0,105,0,107,65534,109,0,111,0,113,1,115,25000,117,60000,119,0,121,0,123,65534,125,0,127,0,129,1,131,25000)); + __m512i test_mm512_maskz_subs_epu16(__mmask32 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_subs_epu16 // CHECK-NOT: @llvm.x86.avx512.mask.psubus.w.512 @@ -1707,6 +1756,8 @@ __m512i test_mm512_maskz_subs_epu16(__mmask32 __U, __m512i __A, __m512i __B) { // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_subs_epu16(__U,__A,__B); } +TEST_CONSTEXPR(match_v32hu(_mm512_maskz_subs_epu16((__mmask32)0xAAAAAAAAu,(__m512i)(__v32hu){51,65000,0,40000,0,100,0,65535,42,0,0,1000,0,1,0,50000,69,65000,0,40000,0,100,0,65535,71,0,0,1000,0,1,0,50000},(__m512i)(__v32hu){2652,5000,0,40000,0,200,0,1,398,1,0,65535,0,0,0,25000,29625,5000,0,40000,0,200,0,1,25274,1,0,65535,0,0,0,25000}),0,60000,0,0,0,0,0,65534,0,0,0,0,0,1,0,25000,0,60000,0,0,0,0,0,65534,0,0,0,0,0,1,0,25000)); + __m512i test_mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U, __m512i __B) { // CHECK-LABEL: test_mm512_mask2_permutex2var_epi16 // CHECK: @llvm.x86.avx512.vpermi2var.hi.512 @@ -2041,6 +2092,7 @@ __m512i test_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, _ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_unpackhi_epi8(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v64qi(_mm512_mask_unpackhi_epi8((__m512i)(__v64qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask64)0xFAAAAAAAAAAAAAAA,(__m512i)(__v64qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,-96,-95,-94,-93,-92,-91,-90,-89},(__m512i)(__v64qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64}),1,-9,3,-10,5,-11,7,-12,9,-13,11,-14,13,-15,15,-16,17,-25,19,-26,21,-27,23,-28,25,-29,27,-30,29,-31,31,-32,33,-41,35,-42,37,-43,39,-44,41,-45,43,-46,45,-47,47,-48,49,-57,51,-58,53,-59,55,-60,57,-61,59,-62,-90,-63,-89,-64)); __m512i test_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_unpackhi_epi8 @@ -2048,6 +2100,7 @@ __m512i test_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_unpackhi_epi8(__U, __A, __B); } +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_unpackhi_epi8((__mmask64)0xFAAAAAAAAAAAAAAA,(__m512i)(__v64qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,-96,-95,-94,-93,-92,-91,-90,-89},(__m512i)(__v64qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64}),0,-9,0,-10,0,-11,0,-12,0,-13,0,-14,0,-15,0,-16,0,-25,0,-26,0,-27,0,-28,0,-29,0,-30,0,-31,0,-32,0,-41,0,-42,0,-43,0,-44,0,-45,0,-46,0,-47,0,-48,0,-57,0,-58,0,-59,0,-60,0,-61,0,-62,-90,-63,-89,-64)); __m512i test_mm512_unpackhi_epi16(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_unpackhi_epi16 @@ -2063,6 +2116,7 @@ __m512i test_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_unpackhi_epi16(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_unpackhi_epi16((__m512i)(__v32hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32},(__mmask32)0xFAAAAAAAu,(__m512i)(__v32hi){100,101,102,103,104,105,106,107,110,111,112,113,114,115,116,117,120,121,122,123,124,125,126,127,130,131,132,133,134,135,136,137},(__m512i)(__v32hi){200,201,202,203,204,205,206,207,210,211,212,213,214,215,216,217,220,221,222,223,224,225,226,227,230,231,232,233,234,235,236,237}),1,204,3,205,5,206,7,207,9,214,11,215,13,216,15,217,17,224,19,225,21,226,23,227,25,234,27,235,136,236,137,237)); __m512i test_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_unpackhi_epi16 @@ -2070,6 +2124,7 @@ __m512i test_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_unpackhi_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_unpackhi_epi16((__mmask32)0xFAAAAAAAu,(__m512i)(__v32hi){100,101,102,103,104,105,106,107,110,111,112,113,114,115,116,117,120,121,122,123,124,125,126,127,130,131,132,133,134,135,136,137},(__m512i)(__v32hi){200,201,202,203,204,205,206,207,210,211,212,213,214,215,216,217,220,221,222,223,224,225,226,227,230,231,232,233,234,235,236,237}),0,204,0,205,0,206,0,207,0,214,0,215,0,216,0,217,0,224,0,225,0,226,0,227,0,234,0,235,136,236,137,237)); __m512i test_mm512_unpacklo_epi8(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_unpacklo_epi8 @@ -2084,6 +2139,7 @@ __m512i test_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, _ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_unpacklo_epi8(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v64qi(_mm512_mask_unpacklo_epi8((__m512i)(__v64qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask64)0xFAAAAAAAAAAAAAAA,(__m512i)(__v64qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,-65},(__m512i)(__v64qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75}),1,-1,3,-2,5,-3,7,-4,9,-5,11,-6,13,-7,15,-8,17,20,19,21,21,22,23,23,25,24,27,25,29,26,31,27,33,40,35,41,37,42,39,43,41,44,43,45,45,46,47,47,49,60,51,61,53,62,55,63,57,64,59,65,-56,66,-57,67)); __m512i test_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_unpacklo_epi8 @@ -2091,6 +2147,7 @@ __m512i test_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_unpacklo_epi8(__U, __A, __B); } +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_unpacklo_epi8((__mmask64)0xFAAAAAAAAAAAAAAA,(__m512i)(__v64qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,-65},(__m512i)(__v64qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75}),0,-1,0,-2,0,-3,0,-4,0,-5,0,-6,0,-7,0,-8,0,20,0,21,0,22,0,23,0,24,0,25,0,26,0,27,0,40,0,41,0,42,0,43,0,44,0,45,0,46,0,47,0,60,0,61,0,62,0,63,0,64,0,65,-56,66,-57,67)); __m512i test_mm512_unpacklo_epi16(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_unpacklo_epi16 @@ -2105,6 +2162,7 @@ __m512i test_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_unpacklo_epi16(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_unpacklo_epi16((__m512i)(__v32hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32},(__mmask32)0xFAAAAAAAu,(__m512i)(__v32hi){100,101,102,103,104,105,106,107,110,111,112,113,114,115,116,117,120,121,122,123,124,125,126,127,130,131,132,133,134,135,136,137},(__m512i)(__v32hi){200,201,202,203,204,205,206,207,210,211,212,213,214,215,216,217,220,221,222,223,224,225,226,227,230,231,232,233,234,235,236,237}),1,200,3,201,5,202,7,203,9,210,11,211,13,212,15,213,17,220,19,221,21,222,23,223,25,230,27,231,132,232,133,233)); __m512i test_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_unpacklo_epi16 @@ -2112,6 +2170,7 @@ __m512i test_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_unpacklo_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_unpacklo_epi16((__mmask32)0xFAAAAAAAu,(__m512i)(__v32hi){100,101,102,103,104,105,106,107,110,111,112,113,114,115,116,117,120,121,122,123,124,125,126,127,130,131,132,133,134,135,136,137},(__m512i)(__v32hi){200,201,202,203,204,205,206,207,210,211,212,213,214,215,216,217,220,221,222,223,224,225,226,227,230,231,232,233,234,235,236,237}),0,200,0,201,0,202,0,203,0,210,0,211,0,212,0,213,0,220,0,221,0,222,0,223,0,230,0,231,132,232,133,233)); __m512i test_mm512_cvtepi8_epi16(__m256i __A) { // CHECK-LABEL: test_mm512_cvtepi8_epi16 @@ -2499,24 +2558,28 @@ __m512i test_mm512_mask_mov_epi16(__m512i __W, __mmask32 __U, __m512i __A) { // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_mov_epi16(__W, __U, __A); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_mov_epi16((__m512i)(__v32hi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31},(__mmask32)0xAAAAAAAA,(__m512i)(__v32hi){-0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31}),0,-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,14,-15,16,-17,18,-19,20,-21,22,-23,24,-25,26,-27,28,-29,30,-31)); __m512i test_mm512_maskz_mov_epi16(__mmask32 __U, __m512i __A) { // CHECK-LABEL: test_mm512_maskz_mov_epi16 // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_mov_epi16(__U, __A); } +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_mov_epi16((__mmask32)0xAAAAAAAA,(__m512i)(__v32hi){-0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31}),0,-1,0,-3,0,-5,0,-7,0,-9,0,-11,0,-13,0,-15,0,-17,0,-19,0,-21,0,-23,0,-25,0,-27,0,-29,0,-31)); __m512i test_mm512_mask_mov_epi8(__m512i __W, __mmask64 __U, __m512i __A) { // CHECK-LABEL: test_mm512_mask_mov_epi8 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_mov_epi8(__W, __U, __A); } +TEST_CONSTEXPR(match_v64qi(_mm512_mask_mov_epi8((__m512i)(__v64qs){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63},(__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qs){-0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63}),0,-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,14,-15,16,-17,18,-19,20,-21,22,-23,24,-25,26,-27,28,-29,30,-31,32,-33,34,-35,36,-37,38,-39,40,-41,42,-43,44,-45,46,-47,48,-49,50,-51,52,-53,54,-55,56,-57,58,-59,60,-61,62,-63)); __m512i test_mm512_maskz_mov_epi8(__mmask64 __U, __m512i __A) { // CHECK-LABEL: test_mm512_maskz_mov_epi8 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_mov_epi8(__U, __A); } +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_mov_epi8((__mmask64)0xAAAAAAAAAAAAAAAA,(__m512i)(__v64qs){-0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63}),0,-1,0,-3,0,-5,0,-7,0,-9,0,-11,0,-13,0,-15,0,-17,0,-19,0,-21,0,-23,0,-25,0,-27,0,-29,0,-31,0,-33,0,-35,0,-37,0,-39,0,-41,0,-43,0,-45,0,-47,0,-49,0,-51,0,-53,0,-55,0,-57,0,-59,0,-61,0,-63)); __m512i test_mm512_mask_set1_epi8(__m512i __O, __mmask64 __M, char __A) { // CHECK-LABEL: test_mm512_mask_set1_epi8 @@ -2585,6 +2648,7 @@ __m512i test_mm512_mask_set1_epi8(__m512i __O, __mmask64 __M, char __A) { // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_set1_epi8(__O, __M, __A); } +TEST_CONSTEXPR(match_v64qi(_mm512_mask_set1_epi8((__m512i)(__v64qi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask64)0xAAAAAAAAAAAAAAAA,(char)42),1,42,3,42,5,42,7,42,9,42,11,42,13,42,15,42,17,42,19,42,21,42,23,42,25,42,27,42,29,42,31,42,33,42,35,42,37,42,39,42,41,42,43,42,45,42,47,42,49,42,51,42,53,42,55,42,57,42,59,42,61,42,63,42)); __m512i test_mm512_maskz_set1_epi8(__mmask64 __M, char __A) { // CHECK-LABEL: test_mm512_maskz_set1_epi8 @@ -2655,6 +2719,7 @@ __m512i test_mm512_maskz_set1_epi8(__mmask64 __M, char __A) { // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_set1_epi8(__M, __A); } +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_set1_epi8((__mmask64)0xAAAAAAAAAAAAAAAA,(char)42),0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42)); __mmask64 test_mm512_kunpackd(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) { // CHECK-LABEL: test_mm512_kunpackd @@ -2830,6 +2895,7 @@ __m512i test_mm512_mask_broadcastb_epi8(__m512i __O, __mmask64 __M, __m128i __A) // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_broadcastb_epi8(__O, __M, __A); } +TEST_CONSTEXPR(match_v64qi(_mm512_mask_broadcastb_epi8((__m512i)(__v64qs){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63},(__mmask64)0xAAAAAAAAAAAAAAAA,(__m128i)(__v16qs){-120,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}),0,-120,2,-120,4,-120,6,-120,8,-120,10,-120,12,-120,14,-120,16,-120,18,-120,20,-120,22,-120,24,-120,26,-120,28,-120,30,-120,32,-120,34,-120,36,-120,38,-120,40,-120,42,-120,44,-120,46,-120,48,-120,50,-120,52,-120,54,-120,56,-120,58,-120,60,-120,62,-120)); __m512i test_mm512_maskz_broadcastb_epi8(__mmask64 __M, __m128i __A) { // CHECK-LABEL: test_mm512_maskz_broadcastb_epi8 @@ -2837,6 +2903,7 @@ __m512i test_mm512_maskz_broadcastb_epi8(__mmask64 __M, __m128i __A) { // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_broadcastb_epi8(__M, __A); } +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_broadcastb_epi8((__mmask64)0xAAAAAAAAAAAAAAAA,(__m128i)(__v16qs){-120,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}),0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120)); __m512i test_mm512_broadcastw_epi16(__m128i __A) { // CHECK-LABEL: test_mm512_broadcastw_epi16 @@ -2878,6 +2945,7 @@ __m512i test_mm512_mask_broadcastw_epi16(__m512i __O, __mmask32 __M, __m128i __A // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_broadcastw_epi16(__O, __M, __A); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_broadcastw_epi16((__m512i)(__v32hi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31},(__mmask32)0xAAAAAAAA,(__m128i)(__v8hi){-120,1,2,3,4,5,6,7}),0,-120,2,-120,4,-120,6,-120,8,-120,10,-120,12,-120,14,-120,16,-120,18,-120,20,-120,22,-120,24,-120,26,-120,28,-120,30,-120)); __m512i test_mm512_maskz_broadcastw_epi16(__mmask32 __M, __m128i __A) { // CHECK-LABEL: test_mm512_maskz_broadcastw_epi16 @@ -2885,6 +2953,7 @@ __m512i test_mm512_maskz_broadcastw_epi16(__mmask32 __M, __m128i __A) { // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_broadcastw_epi16(__M, __A); } +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_broadcastw_epi16((__mmask32)0xAAAAAAAAu,(__m128i)(__v8hi){-120,1,2,3,4,5,6,7}),0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120)); __m512i test_mm512_mask_set1_epi16(__m512i __O, __mmask32 __M, short __A) { // CHECK-LABEL: test_mm512_mask_set1_epi16 @@ -2923,6 +2992,7 @@ __m512i test_mm512_mask_set1_epi16(__m512i __O, __mmask32 __M, short __A) { // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_mask_set1_epi16(__O, __M, __A); } +TEST_CONSTEXPR(match_v32hi(_mm512_mask_set1_epi16((__m512i)(__v32hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32},(__mmask32)0xAAAAAAAA,-1),1,-1,3,-1,5,-1,7,-1,9,-1,11,-1,13,-1,15,-1,17,-1,19,-1,21,-1,23,-1,25,-1,27,-1,29,-1,31,-1)); __m512i test_mm512_maskz_set1_epi16(__mmask32 __M, short __A) { // CHECK-LABEL: test_mm512_maskz_set1_epi16 @@ -2961,6 +3031,8 @@ __m512i test_mm512_maskz_set1_epi16(__mmask32 __M, short __A) { // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}} return _mm512_maskz_set1_epi16(__M, __A); } +TEST_CONSTEXPR(match_v32hi(_mm512_maskz_set1_epi16((__mmask32)0xAAAAAAAA,42),0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42)); + __m512i test_mm512_permutexvar_epi16(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_permutexvar_epi16 // CHECK: @llvm.x86.avx512.permvar.hi.512 diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c index 172a3cb219c8a..28e6afbc24564 100644 --- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c +++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c @@ -941,56 +941,28 @@ __m128i test_mm_mask_blend_epi8(__mmask16 __U, __m128i __A, __m128i __W) { // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_blend_epi8(__U,__A,__W); } -TEST_CONSTEXPR(match_v16qi( - _mm_mask_blend_epi8( - (__mmask16)0x0001, - (__m128i)(__v16qi){2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, - (__m128i)(__v16qi){ 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25 } - ), - 10, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 -)); +TEST_CONSTEXPR(match_v16qi(_mm_mask_blend_epi8((__mmask16)0x0001,(__m128i)(__v16qi){2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2},(__m128i)(__v16qi){10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25}),10,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2)); __m256i test_mm256_mask_blend_epi8(__mmask32 __U, __m256i __A, __m256i __W) { // CHECK-LABEL: test_mm256_mask_blend_epi8 // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_blend_epi8(__U,__A,__W); } -TEST_CONSTEXPR(match_v32qi( - _mm256_mask_blend_epi8( - (__mmask32) 0x00000001, - (__m256i)(__v32qi) {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, - (__m256i)(__v32qi){ 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25} - ), - 10, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 -)); +TEST_CONSTEXPR(match_v32qi(_mm256_mask_blend_epi8((__mmask32)0x00000001,(__m256i)(__v32qi){2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2},(__m256i)(__v32qi){10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25}),10,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2)); __m128i test_mm_mask_blend_epi16(__mmask8 __U, __m128i __A, __m128i __W) { // CHECK-LABEL: test_mm_mask_blend_epi16 // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_blend_epi16(__U,__A,__W); } -TEST_CONSTEXPR(match_v8hi( - _mm_mask_blend_epi16( - (__mmask8)0x01, - (__m128i)(__v8hi){2, 2, 2, 2, 2, 2, 2, 2}, - (__m128i)(__v8hi){ 10,11,12,13,14,15,16,17 } - ), - 10, 2, 2, 2, 2, 2, 2, 2 -)); +TEST_CONSTEXPR(match_v8hi(_mm_mask_blend_epi16((__mmask8)0x01,(__m128i)(__v8hi){2,2,2,2,2,2,2,2},(__m128i)(__v8hi){10,11,12,13,14,15,16,17}),10,2,2,2,2,2,2,2)); __m256i test_mm256_mask_blend_epi16(__mmask16 __U, __m256i __A, __m256i __W) { // CHECK-LABEL: test_mm256_mask_blend_epi16 // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_blend_epi16(__U,__A,__W); } -TEST_CONSTEXPR(match_v16hi( - _mm256_mask_blend_epi16( - (__mmask16)0x0001, - (__m256i)(__v16hi){2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, - (__m256i)(__v16hi){ 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25 } - ), - 10, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 -)); +TEST_CONSTEXPR(match_v16hi(_mm256_mask_blend_epi16((__mmask16)0x0001,(__m256i)(__v16hi){2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2},(__m256i)(__v16hi){10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25}),10,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2)); __m128i test_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_abs_epi8 @@ -1078,48 +1050,63 @@ __m128i test_mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_packs_epi32(__M,__A,__B); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_packs_epi32((__mmask8)0xAA,(__m128i)(__v4si){40000,-50000,65535,-65536},(__m128i)(__v4si){0,50000,40000,-40000}),0,-32768,0,-32768,0,32767,0,-32768)); + __m128i test_mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_packs_epi32 // CHECK: @llvm.x86.sse2.packssdw // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_packs_epi32(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_packs_epi32((__m128i)(__v8hi){1,2,3,4,29,30,31,32},(__mmask8)0xAA,(__m128i)(__v4si){40000,-50000,65535,-65536},(__m128i)(__v4si){0,50000,40000,-40000}),1,-32768,3,-32768,29,32767,31,-32768)); + __m256i test_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_packs_epi32 // CHECK: @llvm.x86.avx2.packssdw // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_packs_epi32(__M,__A,__B); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_packs_epi32((__mmask32)0xAAAA,(__m256i)(__v8si){40000,-50000,32767,-32768,32768,-32769,65535,-65536},(__m256i)(__v8si){0,1,-1,65536,22222,-22222,40000,-40000}),0,-32768,0,-32768,0,1,0,32767,0,-32768,0,-32768,0,-22222,0,-32768)); + __m256i test_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_packs_epi32 // CHECK: @llvm.x86.avx2.packssdw // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_packs_epi32(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_packs_epi32((__m256i)(__v16hi){1,2,3,4,5,6,7,8,25,26,27,28,29,30,31,32},(__mmask16)0xAAAA,(__m256i)(__v8si){40000,-50000,32767,-32768,32768,-32769,65535,-65536},(__m256i)(__v8si){0,1,-1,65536,22222,-22222,40000,-40000}),1,-32768,3,-32768,5,1,7,32767,25,-32768,27,-32768,29,-22222,31,-32768)); + __m128i test_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_packs_epi16 // CHECK: @llvm.x86.sse2.packsswb // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_packs_epi16(__M,__A,__B); } +TEST_CONSTEXPR(match_v16qi(_mm_maskz_packs_epi16((__mmask16)0xAAAA,(__m128i)(__v8hi){130,-200,127,-128,255,-255,127,-128},(__m128i)(__v8hi){0,1,-1,255,-128,90,-90,-32768}),0,-128,0,-128,0,-128,0,-128,0,1,0,127,0,90,0,-128)); + __m128i test_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_packs_epi16 // CHECK: @llvm.x86.sse2.packsswb // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_packs_epi16(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v16qi(_mm_mask_packs_epi16((__m128i)(__v16qi){1,2,3,4,5,6,7,8,57,58,59,60,61,62,63,64},(__mmask16)0xAAAA,(__m128i)(__v8hi){130,-200,127,-128,255,-255,127,-128},(__m128i)(__v8hi){0,1,-1,255,-128,90,-90,-32768}),1,-128,3,-128,5,-128,7,-128,57,1,59,127,61,90,63,-128)); + __m256i test_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_packs_epi16 // CHECK: @llvm.x86.avx2.packsswb // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_packs_epi16(__M,__A,__B); } +TEST_CONSTEXPR(match_v32qi(_mm256_maskz_packs_epi16((__mmask32)0xAAAAAAAA,(__m256i)(__v16hi){130,-200,127,-128,300,-1000,42,-42,500,-500,7,-7,255,-255,127,-128},(__m256i)(__v16hi){0,1,-1,255,-129,128,20000,-32768,0,1,-1,127,-128,90,-90,-32768}),0,-128,0,-128,0,-128,0,-42,0,1,0,127,0,127,0,-128,0,-128,0,-7,0,-128,0,-128,0,1,0,127,0,90,0,-128)); + __m256i test_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_packs_epi16 // CHECK: @llvm.x86.avx2.packsswb // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_packs_epi16(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v32qi(_mm256_mask_packs_epi16((__m256i)(__v32qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask32)0xAAAAAAAA,(__m256i)(__v16hi){130,-200,127,-128,300,-1000,42,-42,500,-500,7,-7,255,-255,127,-128},(__m256i)(__v16hi){0,1,-1,255,-129,128,20000,-32768,0,1,-1,127,-128,90,-90,-32768}),1,-128,3,-128,5,-128,7,-42,9,1,11,127,13,127,15,-128,49,-128,51,-7,53,-128,55,-128,57,1,59,127,61,90,63,-128)); __m128i test_mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_packus_epi32 @@ -1127,6 +1114,7 @@ __m128i test_mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128 // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_packus_epi32(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v8hu(_mm_mask_packus_epi32((__m128i)(__v8hu){1,2,3,4,5,6,7,8},(__mmask8)0xAA,(__m128i)(__v4si){40000,-50000,32767,-32768},(__m128i)(__v4si){0,1,-1,65536}),1,0,3,0,5,1,7,65535)); __m128i test_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_packus_epi32 @@ -1134,6 +1122,7 @@ __m128i test_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_packus_epi32(__M,__A,__B); } +TEST_CONSTEXPR(match_v8hu(_mm_maskz_packus_epi32((__mmask8)0xAA,(__m128i)(__v4si){40000,-50000,32767,-32768},(__m128i)(__v4si){0,1,-1,65536}),0,0,0,0,0,1,0,65535)); __m256i test_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_packus_epi32 @@ -1141,6 +1130,7 @@ __m256i test_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_packus_epi32(__M,__A,__B); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_packus_epi32((__mmask16)0xAAAA,(__m256i)(__v8si){40000,-50000,32767,-32768,32768,-32769,22222,-22222},(__m256i)(__v8si){0,1,-1,65536,40000,-40000,65535,0}),0,0,0,0,0,1,0,-1,0,0,0,0,0,0,0,0)); __m256i test_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_packus_epi32 @@ -1148,6 +1138,7 @@ __m256i test_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_packus_epi32(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_packus_epi32((__m256i)(__v16hi){1,2,3,4,5,6,7,8,25,26,27,28,29,30,31,32},(__mmask16)0xAAAA,(__m256i)(__v8si){40000,-50000,32767,-32768,32768,-32769,22222,-22222},(__m256i)(__v8si){0,1,-1,65536,40000,-40000,65535,0}),1,0,3,0,5,1,7,-1,25,0,27,0,29,0,31,0)); __m128i test_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_packus_epi16 @@ -1155,6 +1146,7 @@ __m128i test_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_packus_epi16(__M,__A,__B); } +TEST_CONSTEXPR(match_v16qu(_mm_maskz_packus_epi16((__mmask16)0xAAAA,(__m128i)(__v8hi){-1,0,1,127,128,255,256,-200},(__m128i)(__v8hi){0,1,-1,255,-129,128,20000,-32768}),0,0,0,127,0,255,0,0,0,1,0,255,0,128,0,0)); __m128i test_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_packus_epi16 @@ -1162,6 +1154,7 @@ __m128i test_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m12 // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_packus_epi16(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v16qu(_mm_mask_packus_epi16((__m128i)(__v16qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16},(__mmask16)0xAAAA,(__m128i)(__v8hi){-1,0,1,127,128,255,256,-200},(__m128i)(__v8hi){0,1,-1,255,-129,128,20000,-32768}),1,0,3,127,5,255,7,0,9,1,11,255,13,128,15,0)); __m256i test_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_packus_epi16 @@ -1169,6 +1162,7 @@ __m256i test_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) { // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_packus_epi16(__M,__A,__B); } +TEST_CONSTEXPR(match_v32qi(_mm256_maskz_packus_epi16((__mmask32)0xAAAAAAAA,(__m256i)(__v16hi){-1,0,1,127,128,255,256,-200,300,42,-42,500,20000,-32768,129,-129},(__m256i)(__v16hi){0,1,-1,255,-129,128,20000,-32768,32767,-32767,127,-128,30000,-30000,90,-90}),0,0,0,127,0,-1,0,0,0,1,0,-1,0,-128,0,0,0,42,0,-1,0,0,0,0,0,0,0,0,0,0,0,0)); __m256i test_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_packus_epi16 @@ -1176,6 +1170,7 @@ __m256i test_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_packus_epi16(__W,__M,__A,__B); } +TEST_CONSTEXPR(match_v32qi(_mm256_mask_packus_epi16((__m256i)(__v32qi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask32)0xAAAAAAAA,(__m256i)(__v16hi){-1,0,1,127,128,255,256,-200,300,42,-42,500,20000,-32768,129,-129},(__m256i)(__v16hi){0,1,-1,255,-129,128,20000,-32768,32767,-32767,127,-128,30000,-30000,90,-90}),1,0,3,127,5,-1,7,0,9,1,11,-1,13,-128,15,0,49,42,51,-1,53,0,55,0,57,0,59,0,61,0,63,0)); __m128i test_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_adds_epi8 @@ -1183,48 +1178,64 @@ __m128i test_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_adds_epi8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v16qi(_mm_mask_adds_epi8((__m128i)(__v16qs){1,2,3,4,5,6,7,8,57,58,59,60,61,62,63,64},(__mmask16)0xAAAA,(__m128i)(__v16qs){0,+1,-2,+3,-4,+5,-6,+7,-100,-50,+100,-20,-80,+120,-120,-20},(__m128i)(__v16qs){0,+1,-2,+3,-4,+5,-6,+7,+50,+80,-50,+110,+60,120,+20,-120}),1,+2,3,+6,5,+10,7,+14,57,+30,59,+90,61,+127,63,-128)); + __m128i test_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_adds_epi8 // CHECK: @llvm.sadd.sat.v16i8 // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_adds_epi8(__U,__A,__B); } +TEST_CONSTEXPR(match_v16qi(_mm_maskz_adds_epi8((__mmask16)0xAAAA,(__m128i)(__v16qs){0,+1,-2,+3,-4,+5,-6,+7,-100,-50,+100,-20,-80,+120,-120,-20},(__m128i)(__v16qs){0,+1,-2,+3,-4,+5,-6,+7,+50,+80,-50,+110,+60,120,+20,-120}),0,+2,0,+6,0,+10,0,+14,0,+30,0,+90,0,+127,0,-128)); + __m256i test_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_adds_epi8 // CHECK: @llvm.sadd.sat.v32i8 // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_adds_epi8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v32qi(_mm256_mask_adds_epi8((__m256i)(__v32qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask32)0xAAAAAAAA,(__m256i)(__v32qs){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,+100,+50,-100,+20,+80,-50,+120,-20,-100,-50,+100,-20,-80,+50,-120,+20},(__m256i)(__v32qs){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,+50,+80,-50,+110,+60,-30,+20,-10,+50,+80,-50,+110,+60,-30,+20,-10}),1,+2,3,+6,5,+10,7,+14,9,+18,11,+22,13,+26,15,+30,49,+127,51,+127,53,-80,+55,-30,57,+30,59,+90,61,+20,63,+10)); + __m256i test_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_adds_epi8 // CHECK: @llvm.sadd.sat.v32i8 // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_adds_epi8(__U,__A,__B); } +TEST_CONSTEXPR(match_v32qi(_mm256_maskz_adds_epi8((__mmask32)0xAAAAAAAA,(__m256i)(__v32qs){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,+100,+50,-100,+20,+80,-50,+120,-20,-100,-50,+100,-20,-80,+50,-120,+20},(__m256i)(__v32qs){0,+1,-2,+3,-4,+5,-6,+7,-8,+9,-10,+11,-12,+13,-14,+15,+50,+80,-50,+110,+60,-30,+20,-10,+50,+80,-50,+110,+60,-30,+20,-10}),0,+2,0,+6,0,+10,0,+14,0,+18,0,+22,0,+26,0,+30,0,+127,0,+127,0,-80,0,-30,0,+30,0,+90,0,+20,0,+10)); + __m128i test_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_adds_epi16 // CHECK: @llvm.sadd.sat.v8i16 // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_adds_epi16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_adds_epi16((__m128i)(__v8hi){9,10,11,12,13,14,15,16,},(__mmask8)0xAA,(__m128i)(__v8hi){-24,+25,-26,+27,+32000,-32000,+32000,+32000},(__m128i)(__v8hi){-24,+25,-26,+27,+800,-800,-800,+800}),9,+50,11,+54,13,-32768,15,+32767)); + __m128i test_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_adds_epi16 // CHECK: @llvm.sadd.sat.v8i16 // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_adds_epi16(__U,__A,__B); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_adds_epi16((__mmask8)0xAA,(__m128i)(__v8hi){-24,+25,-26,+27,+32000,-32000,+32000,+32000},(__m128i)(__v8hi){-24,+25,-26,+27,+800,-800,-800,+800}),0,+50,0,+54,0,-32768,0,+32767)); + __m256i test_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_adds_epi16 // CHECK: @llvm.sadd.sat.v16i16 // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_adds_epi16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_adds_epi16((__m256i)(__v16hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,},(__mmask16)0xAAAA,(__m256i)(__v16hi){0,+1,-2,+3,-4,+5,-6,+7,-24,+25,-26,+27,+32000,-32000,+32000,+32000},(__m256i)(__v16hi){0,+1,-2,+3,-4,+5,-6,+7,-24,+25,-26,+27,+800,-800,-800,+800}),1,+2,3,+6,5,+10,7,+14,9,+50,11,+54,13,-32768,15,+32767)); + __m256i test_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_adds_epi16 // CHECK: @llvm.sadd.sat.v16i16 // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_adds_epi16(__U,__A,__B); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_adds_epi16((__mmask16)0xAAAA,(__m256i)(__v16hi){0,+1,-2,+3,-4,+5,-6,+7,-24,+25,-26,+27,+32000,-32000,+32000,+32000},(__m256i)(__v16hi){0,+1,-2,+3,-4,+5,-6,+7,-24,+25,-26,+27,+800,-800,-800,+800}),0,+2,0,+6,0,+10,0,+14,0,+50,0,+54,0,-32768,0,+32767)); + __m128i test_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_adds_epu8 // CHECK-NOT: @llvm.x86.sse2.paddus.b @@ -1232,6 +1243,8 @@ __m128i test_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_adds_epu8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v16qu(_mm_mask_adds_epu8((__m128i)(__v16qu){1,2,3,4,5,6,7,8,57,58,59,60,61,62,63,64},(__mmask16)0xAAAA,(__m128i)(__v16qu){0,0,0,0,0,0,0,0,+255,+255,+255,+255,+255,+255,+255,+255},(__m128i)(__v16qu){0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255}),1,+63,3,+127,5,+191,7,+255,57,+255,59,+255,61,+255,63,+255)); + __m128i test_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_adds_epu8 // CHECK-NOT: @llvm.x86.sse2.paddus.b @@ -1239,6 +1252,8 @@ __m128i test_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_adds_epu8(__U,__A,__B); } +TEST_CONSTEXPR(match_v16qu(_mm_maskz_adds_epu8((__mmask16)0xAAAA,(__m128i)(__v16qu){0,0,0,0,0,0,0,0,+255,+255,+255,+255,+255,+255,+255,+255},(__m128i)(__v16qu){0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255}),0,+63,0,+127,0,+191,0,+255,0,+255,0,+255,0,+255,0,+255)); + __m256i test_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_adds_epu8 // CHECK-NOT: @llvm.x86.avx2.paddus.b @@ -1246,6 +1261,8 @@ __m256i test_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m25 // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_adds_epu8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v32qu(_mm256_mask_adds_epu8((__m256i)(__v32qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask32)0xAAAAAAAA,(__m256i)(__v32qu){0,0,0,0,0,0,0,0,+63,+63,+63,+63,+63,+63,+63,+63,+192,+192,+192,+192,+192,+192,+192,+192,+255,+255,+255,+255,+255,+255,+255,+255},(__m256i)(__v32qu){0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255}),1,+63,3,+127,5,+191,7,+255,9,+126,11,+190,13,+254,15,+255,49,+255,51,+255,53,+255,55,+255,57,+255,59,+255,61,+255,63,+255)); + __m256i test_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_adds_epu8 // CHECK-NOT: @llvm.x86.avx2.paddus.b @@ -1253,6 +1270,8 @@ __m256i test_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_adds_epu8(__U,__A,__B); } +TEST_CONSTEXPR(match_v32qu(_mm256_maskz_adds_epu8((__mmask32)0xAAAAAAAA,(__m256i)(__v32qu){0,0,0,0,0,0,0,0,+63,+63,+63,+63,+63,+63,+63,+63,+192,+192,+192,+192,+192,+192,+192,+192,+255,+255,+255,+255,+255,+255,+255,+255},(__m256i)(__v32qu){0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255,0,+63,+64,+127,+128,+191,+192,+255}),0,+63,0,+127,0,+191,0,+255,0,+126,0,+190,0,+254,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255,0,+255)); + __m128i test_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_adds_epu16 // CHECK-NOT: @llvm.x86.sse2.paddus.w @@ -1260,6 +1279,8 @@ __m128i test_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_adds_epu16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v8hu(_mm_mask_adds_epu16((__m128i)(__v8hu){25,26,27,28,29,30,31,32},(__mmask8)0xAA,(__m128i)(__v8hu){+16384,+16384,+16384,+16384,+49152,+49152,+49152,+49152},(__m128i)(__v8hu){0,+16384,+32767,+32768,+32767,+32768,+49152,+65535}),25,+32768,27,+49152,29,+65535,31,+65535)); + __m128i test_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_adds_epu16 // CHECK-NOT: @llvm.x86.sse2.paddus.w @@ -1267,6 +1288,8 @@ __m128i test_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_adds_epu16(__U,__A,__B); } +TEST_CONSTEXPR(match_v8hu(_mm_maskz_adds_epu16((__mmask8)0xAA,(__m128i)(__v8hu){+16384,+16384,+16384,+16384,+49152,+49152,+49152,+49152},(__m128i)(__v8hu){0,+16384,+32767,+32768,+32767,+32768,+49152,+65535}),0,+32768,0,+49152,0,+65535,0,+65535)); + __m256i test_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_adds_epu16 // CHECK-NOT: @llvm.x86.avx2.paddus.w @@ -1274,6 +1297,8 @@ __m256i test_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m2 // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_adds_epu16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v16hu(_mm256_mask_adds_epu16((__m256i)(__v16hu){1,2,3,4,5,6,7,8,25,26,27,28,29,30,31,32},(__mmask16)0xAAAA,(__m256i)(__v16hu){0,0,0,0,+16384,+16384,+16384,+16384,+49152,+49152,+49152,+49152,+65535,+65535,+65535,+65535},(__m256i)(__v16hu){0,+32767,+32768,+65535,0,+16384,+32767,+32768,+32767,+32768,+49152,+65535,0,+32767,+32768,+65535}),1,+32767,3,+65535,5,+32768,7,+49152,25,+65535,27,+65535,29,+65535,31,+65535)); + __m256i test_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_adds_epu16 // CHECK-NOT: @llvm.x86.avx2.paddus.w @@ -1281,6 +1306,8 @@ __m256i test_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_adds_epu16(__U,__A,__B); } +TEST_CONSTEXPR(match_v16hu(_mm256_maskz_adds_epu16((__mmask16)0xAAAA,(__m256i)(__v16hu){0,0,0,0,+16384,+16384,+16384,+16384,+49152,+49152,+49152,+49152,+65535,+65535,+65535,+65535},(__m256i)(__v16hu){0,+32767,+32768,+65535,0,+16384,+32767,+32768,+32767,+32768,+49152,+65535,0,+32767,+32768,+65535}),0,+32767,0,+65535,0,+32768,0,+49152,0,+65535,0,+65535,0,+65535,0,+65535)); + __m128i test_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_avg_epu8 // CHECK: @llvm.x86.sse2.pavg.b @@ -1740,48 +1767,64 @@ __m128i test_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_subs_epi8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v16qi(_mm_mask_subs_epi8((__m128i)(__v16qs){1,2,3,4,5,6,7,8,57,58,59,60,61,62,63,64},(__mmask16)0xAAAA,(__m128i)(__v16qs){1,-100,3,4,5,-6,7,100,57,-100,59,60,61,-62,63,100},(__m128i)(__v16qs){1,100,3,4,5,6,7,-100,57,100,59,60,61,62,63,-100}),1,-128,3,0,5,-12,7,127,57,-128,59,0,61,-124,63,127)); + __m128i test_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_subs_epi8 // CHECK: @llvm.ssub.sat.v16i8 // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_subs_epi8(__U,__A,__B); } +TEST_CONSTEXPR(match_v16qi(_mm_maskz_subs_epi8((__mmask16)0xAAAA,(__m128i)(__v16qs){1,-100,3,4,5,-6,7,100,57,-100,59,60,61,-62,63,100},(__m128i)(__v16qs){1,100,3,4,5,6,7,-100,57,100,59,60,61,62,63,-100}),0,-128,0,0,0,-12,0,127,0,-128,0,0,0,-124,0,127)); + __m256i test_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_subs_epi8 // CHECK: @llvm.ssub.sat.v32i8 // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_subs_epi8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v32qi(_mm256_mask_subs_epi8((__m256i)(__v32qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask32)0xAAAAAAAA,(__m256i)(__v32qs){1,-100,3,4,5,-6,7,100,9,-100,11,12,13,-14,15,100,49,-100,51,52,53,-54,55,100,57,-100,59,60,61,-62,63,100},(__m256i)(__v32qs){1,100,3,4,5,6,7,-100,9,100,11,12,13,14,15,-100,49,100,51,52,53,54,55,-100,57,100,59,60,61,62,63,-100}),1,-128,3,0,5,-12,7,127,9,-128,11,0,13,-28,15,127,49,-128,51,0,53,-108,55,127,57,-128,59,0,61,-124,63,127)); + __m256i test_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_subs_epi8 // CHECK: @llvm.ssub.sat.v32i8 // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_subs_epi8(__U,__A,__B); } +TEST_CONSTEXPR(match_v32qi(_mm256_maskz_subs_epi8((__mmask32)0xAAAAAAAA,(__m256i)(__v32qs){1,-100,3,4,5,-6,7,100,9,-100,11,12,13,-14,15,100,49,-100,51,52,53,-54,55,100,57,-100,59,60,61,-62,63,100},(__m256i)(__v32qs){1,100,3,4,5,6,7,-100,9,100,11,12,13,14,15,-100,49,100,51,52,53,54,55,-100,57,100,59,60,61,62,63,-100}),0,-128,0,0,0,-12,0,127,0,-128,0,0,0,-28,0,127,0,-128,0,0,0,-108,0,127,0,-128,0,0,0,-124,0,127)); + __m128i test_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_subs_epi16 // CHECK: @llvm.ssub.sat.v8i16 // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_subs_epi16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_subs_epi16((__m128i)(__v8hi){1,2,3,4,29,30,31,32},(__mmask8)0xAA,(__m128i)(__v8hi){1,-30000,3,30000,29,-30,31,32},(__m128i)(__v8hi){1,30000,3,-30000,29,30,31,-32}),1,-32768,3,32767,29,-60,31,64)); + __m128i test_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_subs_epi16 // CHECK: @llvm.ssub.sat.v8i16 // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_subs_epi16(__U,__A,__B); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_subs_epi16((__mmask8)0xAA,(__m128i)(__v8hi){1,-30000,3,30000,29,-30,31,32},(__m128i)(__v8hi){1,30000,3,-30000,29,30,31,-32}),0,-32768,0,32767,0,-60,0,64)); + __m256i test_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_subs_epi16 // CHECK: @llvm.ssub.sat.v16i16 // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_subs_epi16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_subs_epi16((__m256i)(__v16hi){1,2,3,4,5,6,7,8,25,26,27,28,29,30,31,32},(__mmask16)0xAAAA,(__m256i)(__v16hi){1,-30000,3,30000,5,-6,7,8,25,-30000,27,30000,29,-30,31,32},(__m256i)(__v16hi){1,30000,3,-30000,5,6,7,-8,25,30000,27,-30000,29,30,31,-32}),1,-32768,3,32767,5,-12,7,16,25,-32768,27,32767,29,-60,31,64)); + __m256i test_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_subs_epi16 // CHECK: @llvm.ssub.sat.v16i16 // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_subs_epi16(__U,__A,__B); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_subs_epi16((__mmask16)0xAAAA,(__m256i)(__v16hi){1,-30000,3,30000,5,-6,7,8,25,-30000,27,30000,29,-30,31,32},(__m256i)(__v16hi){1,30000,3,-30000,5,6,7,-8,25,30000,27,-30000,29,30,31,-32}),0,-32768,0,32767,0,-12,0,16,0,-32768,0,32767,0,-60,0,64)); + __m128i test_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_subs_epu8 // CHECK-NOT: @llvm.x86.sse2.psubus.b @@ -1789,6 +1832,8 @@ __m128i test_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_subs_epu8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v16qu(_mm_mask_subs_epu8((__m128i)(__v16qu){1,2,3,4,5,6,7,8,57,58,59,60,61,62,63,64},(__mmask16)0xAAAA,(__m128i)(__v16qu){0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255},(__m128i)(__v16qu){0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255}),1,200,3,0,5,0,7,254,57,0,59,1,61,1,63,0)); + __m128i test_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_subs_epu8 // CHECK-NOT: @llvm.x86.sse2.psubus.b @@ -1796,6 +1841,8 @@ __m128i test_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_subs_epu8(__U,__A,__B); } +TEST_CONSTEXPR(match_v16qu(_mm_maskz_subs_epu8((__mmask16)0xAAAA,(__m128i)(__v16qu){0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255},(__m128i)(__v16qu){0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255}),0,200,0,0,0,0,0,254,0,0,0,1,0,1,0,0)); + __m256i test_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_subs_epu8 // CHECK-NOT: @llvm.x86.avx2.psubus.b @@ -1803,6 +1850,8 @@ __m256i test_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m25 // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_subs_epu8(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v32qu(_mm256_mask_subs_epu8((__m256i)(__v32qu){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask32)0xAAAAAAAA,(__m256i)(__v32qu){0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255,0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255},(__m256i)(__v32qu){0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255,0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255}),1,200,3,0,5,0,7,254,9,0,11,1,13,1,15,0,49,200,51,0,53,0,55,254,57,0,59,1,61,1,63,0)); + __m256i test_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_subs_epu8 // CHECK-NOT: @llvm.x86.avx2.psubus.b @@ -1810,6 +1859,8 @@ __m256i test_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_subs_epu8(__U,__A,__B); } +TEST_CONSTEXPR(match_v32qu(_mm256_maskz_subs_epu8((__mmask32)0xAAAAAAAA,(__m256i)(__v32qu){0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255,0,250,0,128,0,20,0,255,0,0,0,1,0,100,0,255},(__m256i)(__v32qu){0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255,0,50,0,128,0,30,0,1,0,1,0,0,0,99,0,255}),0,200,0,0,0,0,0,254,0,0,0,1,0,1,0,0,0,200,0,0,0,0,0,254,0,0,0,1,0,1,0,0)); + __m128i test_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_subs_epu16 // CHECK-NOT: @llvm.x86.sse2.psubus.w @@ -1817,6 +1868,8 @@ __m128i test_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_subs_epu16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v8hu(_mm_mask_subs_epu16((__m128i)(__v8hu){101,102,103,104,129,130,131,132},(__mmask8)0xAAu,(__m128i)(__v8hu){0,65000,0,40000,0,1,0,50000},(__m128i)(__v8hu){0,5000,0,60000,0,0,0,25000}),101,60000,103,0,129,1,131,25000)); + __m128i test_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_subs_epu16 // CHECK-NOT: @llvm.x86.sse2.psubus.w @@ -1824,6 +1877,8 @@ __m128i test_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_subs_epu16(__U,__A,__B); } +TEST_CONSTEXPR(match_v8hu(_mm_maskz_subs_epu16((__mmask8)0xAAu,(__m128i)(__v8hu){0,65000,0,40000,0,1,0,50000},(__m128i)(__v8hu){0,5000,0,60000,0,0,0,25000}),0,60000,0,0,0,1,0,25000)); + __m256i test_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_subs_epu16 // CHECK-NOT: @llvm.x86.avx2.psubus.w @@ -1831,6 +1886,8 @@ __m256i test_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m2 // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_subs_epu16(__W,__U,__A,__B); } +TEST_CONSTEXPR(match_v16hu(_mm256_mask_subs_epu16((__m256i)(__v16hu){101,102,103,104,105,106,107,108,125,126,127,128,129,130,131,132},(__mmask16)0xAAAAu,(__m256i)(__v16hu){0,65000,0,40000,0,100,0,65535,0,0,0,1000,0,1,0,50000},(__m256i)(__v16hu){0,5000,0,40000,0,200,0,1,0,1,0,65535,0,0,0,25000}),101,60000,103,0,105,0,107,65534,125,0,127,0,129,1,131,25000)); + __m256i test_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_subs_epu16 // CHECK-NOT: @llvm.x86.avx2.psubus.w @@ -1838,7 +1895,7 @@ __m256i test_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_subs_epu16(__U,__A,__B); } - +TEST_CONSTEXPR(match_v16hu(_mm256_maskz_subs_epu16((__mmask16)0xAAAAu,(__m256i)(__v16hu){0,65000,0,40000,0,100,10,65535,0,0,0,1000,0,1,10000,50000},(__m256i)(__v16hu){0,5000,0,40000,0,200,0,1,0,1,0,65535,0,0,0,25000}),0,60000,0,0,0,0,0,65534,0,0,0,0,0,1,0,25000)); __m128i test_mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) { // CHECK-LABEL: test_mm_mask2_permutex2var_epi16 @@ -2233,6 +2290,7 @@ __m128i test_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m1 // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_unpackhi_epi8(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v16qi(_mm_mask_unpackhi_epi8((__m128i)(__v16qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16},(__mmask16)0xFAAA,(__m128i)(__v16qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115},(__m128i)(__v16qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16}),1,-9,3,-10,5,-11,7,-12,9,-13,11,-14,114,-15,115,-16)); __m128i test_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_unpackhi_epi8 @@ -2240,6 +2298,7 @@ __m128i test_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_unpackhi_epi8(__U, __A, __B); } +TEST_CONSTEXPR(match_v16qi(_mm_maskz_unpackhi_epi8((__mmask16)0xFAAA,(__m128i)(__v16qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115},(__m128i)(__v16qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16}),0,-9,0,-10,0,-11,0,-12,0,-13,0,-14,114,-15,115,-16)); __m256i test_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_unpackhi_epi8 @@ -2247,6 +2306,7 @@ __m256i test_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, _ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_unpackhi_epi8(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v32qi(_mm256_mask_unpackhi_epi8((__m256i)(__v32qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask32)0xFAAAAAAA,(__m256i)(__v32qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,-104,-103,-102,-101,-100,-99,-98,-97,-96,-95,-94,-93,-92,-91,-90,-89},(__m256i)(__v32qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64}),1,-9,3,-10,5,-11,7,-12,9,-13,11,-14,13,-15,15,-16,49,-57,51,-58,53,-59,55,-60,57,-61,59,-62,-90,-63,-89,-64)); __m256i test_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_unpackhi_epi8 @@ -2254,6 +2314,7 @@ __m256i test_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_unpackhi_epi8(__U, __A, __B); } +TEST_CONSTEXPR(match_v32qi(_mm256_maskz_unpackhi_epi8((__mmask32)0xFAAAAAAA,(__m256i)(__v32qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,-104,-103,-102,-101,-100,-99,-98,-97,-96,-95,-94,-93,-92,-91,-90,-89},(__m256i)(__v32qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64}),0,-9,0,-10,0,-11,0,-12,0,-13,0,-14,0,-15,0,-16,0,-57,0,-58,0,-59,0,-60,0,-61,0,-62,-90,-63,-89,-64)); __m128i test_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_unpackhi_epi16 @@ -2261,6 +2322,7 @@ __m128i test_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m1 // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_unpackhi_epi16(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_unpackhi_epi16((__m128i)(__v8hi){1,2,3,4,5,6,7,8},(__mmask8)0xFA,(__m128i)(__v8hi){100,101,102,103,104,105,106,107},(__m128i)(__v8hi){200,201,202,203,204,205,206,207}),1,204,3,205,106,206,107,207)); __m128i test_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_unpackhi_epi16 @@ -2268,6 +2330,7 @@ __m128i test_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_unpackhi_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_unpackhi_epi16((__mmask8)0xFA,(__m128i)(__v8hi){100,101,102,103,104,105,106,107},(__m128i)(__v8hi){200,201,202,203,204,205,206,207}),0,204,0,205,106,206,107,207)); __m256i test_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_unpackhi_epi16 @@ -2275,6 +2338,7 @@ __m256i test_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_unpackhi_epi16(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_unpackhi_epi16((__m256i)(__v16hi){1,2,3,4,5,6,7,8,25,26,27,28,29,30,31,32},(__mmask16)0xFAAAu,(__m256i)(__v16hi){100,101,102,103,104,105,106,107,130,131,132,133,134,135,136,137},(__m256i)(__v16hi){200,201,202,203,204,205,206,207,230,231,232,233,234,235,236,237}),1,204,3,205,5,206,7,207,25,234,27,235,136,236,137,237)); __m256i test_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_unpackhi_epi16 @@ -2282,6 +2346,7 @@ __m256i test_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_unpackhi_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_unpackhi_epi16((__mmask16)0xFAAAu,(__m256i)(__v16hi){100,101,102,103,104,105,106,107,130,131,132,133,134,135,136,137},(__m256i)(__v16hi){200,201,202,203,204,205,206,207,230,231,232,233,234,235,236,237}),0,204,0,205,0,206,0,207,0,234,0,235,136,236,137,237)); __m128i test_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_unpacklo_epi8 @@ -2289,6 +2354,7 @@ __m128i test_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m1 // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_unpacklo_epi8(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v16qi(_mm_mask_unpacklo_epi8((__m128i)(__v16qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16},(__mmask16)0xFAAA,(__m128i)(__v16qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115},(__m128i)(__v16qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16}),1,-1,3,-2,5,-3,7,-4,9,-5,11,-6,106,-7,107,-8)); __m128i test_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_unpacklo_epi8 @@ -2296,6 +2362,7 @@ __m128i test_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_unpacklo_epi8(__U, __A, __B); } +TEST_CONSTEXPR(match_v16qi(_mm_maskz_unpacklo_epi8((__mmask16)0xFAAA,(__m128i)(__v16qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115},(__m128i)(__v16qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16}),0,-1,0,-2,0,-3,0,-4,0,-5,0,-6,106,-7,107,-8)); __m256i test_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_unpacklo_epi8 @@ -2303,6 +2370,7 @@ __m256i test_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, _ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_unpacklo_epi8(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v32qi(_mm256_mask_unpacklo_epi8((__m256i)(__v32qs){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask32)0xFAAAAAAA,(__m256i)(__v32qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,-65},(__m256i)(__v32qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75}),1,-1,3,-2,5,-3,7,-4,9,-5,11,-6,13,-7,15,-8,49,60,51,61,53,62,55,63,57,64,59,65,-56,66,-57,67)); __m256i test_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_unpacklo_epi8 @@ -2310,6 +2378,7 @@ __m256i test_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_unpacklo_epi8(__U, __A, __B); } +TEST_CONSTEXPR(match_v32qi(_mm256_maskz_unpacklo_epi8((__mmask32)0xFAAAAAAA,(__m256i)(__v32qs){100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,-65},(__m256i)(__v32qs){-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75}),0,-1,0,-2,0,-3,0,-4,0,-5,0,-6,0,-7,0,-8,0,60,0,61,0,62,0,63,0,64,0,65,-56,66,-57,67)); __m128i test_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_unpacklo_epi16 @@ -2317,6 +2386,7 @@ __m128i test_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m1 // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_unpacklo_epi16(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_unpacklo_epi16((__m128i)(__v8hi){1,2,3,4,5,6,7,8},(__mmask8)0xFAu,(__m128i)(__v8hi){100,101,102,103,104,105,106,107},(__m128i)(__v8hi){200,201,202,203,204,205,206,207}),1,200,3,201,102,202,103,203)); __m128i test_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_unpacklo_epi16 @@ -2324,6 +2394,7 @@ __m128i test_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_unpacklo_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_unpacklo_epi16((__mmask8)0xFAu,(__m128i)(__v8hi){100,101,102,103,104,105,106,107},(__m128i)(__v8hi){200,201,202,203,204,205,206,207}),0,200,0,201,102,202,103,203)); __m256i test_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_unpacklo_epi16 @@ -2331,6 +2402,7 @@ __m256i test_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_unpacklo_epi16(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_unpacklo_epi16((__m256i)(__v16hi){1,2,3,4,5,6,7,8,25,26,27,28,29,30,31,32},(__mmask16)0xFAAAu,(__m256i)(__v16hi){100,101,102,103,104,105,106,107,130,131,132,133,134,135,136,137},(__m256i)(__v16hi){200,201,202,203,204,205,206,207,230,231,232,233,234,235,236,237}),1,200,3,201,5,202,7,203,25,230,27,231,132,232,133,233)); __m256i test_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_unpacklo_epi16 @@ -2338,6 +2410,7 @@ __m256i test_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_unpacklo_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_unpacklo_epi16((__mmask16)0xFAAAu,(__m256i)(__v16hi){100,101,102,103,104,105,106,107,130,131,132,133,134,135,136,137},(__m256i)(__v16hi){200,201,202,203,204,205,206,207,230,231,232,233,234,235,236,237}),0,200,0,201,0,202,0,203,0,230,0,231,132,232,133,233)); __m128i test_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi8_epi16 @@ -2345,6 +2418,7 @@ __m128i test_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_cvtepi8_epi16(__W, __U, __A); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_cvtepi8_epi16(_mm_set1_epi16(-777),(__mmask8)0xA5,(__m128i)(__v16qs){1,-2,3,-4,5,-6,7,-8,9,10,11,12,13,14,15,16}),1,-777,3,-777,-777,-6,-777,-8)); __m128i test_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_maskz_cvtepi8_epi16 @@ -2352,6 +2426,7 @@ __m128i test_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_cvtepi8_epi16(__U, __A); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_cvtepi8_epi16((__mmask8)0xA5,(__m128i)(__v16qs){1,-2,3,-4,5,-6,7,-8,9,10,11,12,13,14,15,16}),1,0,3,0,0,-6,0,-8)); __m256i test_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi8_epi16 @@ -2359,6 +2434,7 @@ __m256i test_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_cvtepi8_epi16(__W, __U, __A); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_cvtepi8_epi16(_mm256_set1_epi16(-777),/*1001110010100101=*/0x9ca5,(__m128i)(__v16qs){1,-2,3,-4,5,-6,7,-8,25,-26,27,-28,29,-30,31,-32}),1,-777,3,-777,-777,-6,-777,-8,-777,-777,27,-28,29,-777,-777,-32)); __m256i test_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) { // CHECK-LABEL: test_mm256_maskz_cvtepi8_epi16 @@ -2366,6 +2442,7 @@ __m256i test_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_cvtepi8_epi16(__U, __A); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_cvtepi8_epi16(/*1001110010100101=*/0x9ca5,(__m128i)(__v16qs){1,-2,3,-4,5,-6,7,-8,25,-26,27,-28,29,-30,31,-32}),1,0,3,0,0,-6,0,-8,0,0,27,-28,29,0,0,-32)); __m128i test_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepu8_epi16 @@ -2373,6 +2450,7 @@ __m128i test_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_cvtepu8_epi16(__W, __U, __A); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_cvtepu8_epi16(_mm_set1_epi16(-777),(__mmask8)0xA5,(__m128i)(__v16qu){25,26,27,28,29,30,31,32,0,0,0,0,0,0,0,0}),25,-777,27,-777,-777,30,-777,32)); __m128i test_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_maskz_cvtepu8_epi16 @@ -2380,6 +2458,7 @@ __m128i test_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_cvtepu8_epi16(__U, __A); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_cvtepu8_epi16((__mmask8)0xA5,(__m128i)(__v16qu){25,26,27,28,29,30,31,32,0,0,0,0,0,0,0,0}),25,0,27,0,0,30,0,32)); __m256i test_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) { // CHECK-LABEL: test_mm256_mask_cvtepu8_epi16 @@ -2387,6 +2466,7 @@ __m256i test_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_cvtepu8_epi16(__W, __U, __A); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_cvtepu8_epi16(_mm256_set1_epi16(-777),/*1001110010100101=*/0x9ca5,(__m128i)(__v16qu){1,2,3,4,5,6,7,8,25,26,27,28,29,30,31,32}),1,-777,3,-777,-777,6,-777,8,-777,-777,27,28,29,-777,-777,32)); __m256i test_mm256_maskz_cvtepu8_epi16(__mmask16 __U, __m128i __A) { // CHECK-LABEL: test_mm256_maskz_cvtepu8_epi16 @@ -2394,6 +2474,7 @@ __m256i test_mm256_maskz_cvtepu8_epi16(__mmask16 __U, __m128i __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_cvtepu8_epi16(__U, __A); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_cvtepu8_epi16(/*1001110010100101=*/0x9ca5,(__m128i)(__v16qu){1,2,3,4,5,6,7,8,25,26,27,28,29,30,31,32}),1,0,3,0,0,6,0,8,0,0,27,28,29,0,0,32)); __m256i test_mm256_sllv_epi16(__m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_sllv_epi16 @@ -2468,6 +2549,7 @@ __m256i test_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_sll_epi16(__U, __A, __B); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_slli_epi16((__mmask8)0xAA, (__m128i)(__v8hi){0, 1, 2, 3, 4, 5, 6, 7}, 20), 0, 0, 0, 0, 0, 0, 0, 0)); __m128i test_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_slli_epi16 @@ -2475,6 +2557,7 @@ __m128i test_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_slli_epi16(__W, __U, __A, 5); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_slli_epi16((__m128i)(__v8hi){100, 101, 102, 103, 104, 105, 106, 107}, (__mmask8)0xAA, (__m128i)(__v8hi){0, 1, 2, 3, 4, 5, 6, 7}, 20), 100, 0, 102, 0, 104, 0, 106, 0)); __m128i test_mm_mask_slli_epi16_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) { // CHECK-LABEL: test_mm_mask_slli_epi16_2 @@ -3152,6 +3235,7 @@ __m128i test_mm_mask_broadcastb_epi8(__m128i __O, __mmask16 __M, __m128i __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_broadcastb_epi8(__O, __M, __A); } +TEST_CONSTEXPR(match_v16qi(_mm_mask_broadcastb_epi8((__m128i)(__v16qs){0,1,2,3,4,5,6,7,56,57,58,59,60,61,62,63},(__mmask16)0xAAAA,(__m128i)(__v16qs){-120,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}),0,-120,2,-120,4,-120,6,-120,56,-120,58,-120,60,-120,62,-120)); __m128i test_mm_maskz_broadcastb_epi8(__mmask16 __M, __m128i __A) { // CHECK-LABEL: test_mm_maskz_broadcastb_epi8 @@ -3159,6 +3243,7 @@ __m128i test_mm_maskz_broadcastb_epi8(__mmask16 __M, __m128i __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_broadcastb_epi8(__M, __A); } +TEST_CONSTEXPR(match_v16qi(_mm_maskz_broadcastb_epi8((__mmask16)0xAAAA,(__m128i)(__v16qs){-120,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}),0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120)); __m256i test_mm256_mask_broadcastb_epi8(__m256i __O, __mmask32 __M, __m128i __A) { // CHECK-LABEL: test_mm256_mask_broadcastb_epi8 @@ -3166,6 +3251,7 @@ __m256i test_mm256_mask_broadcastb_epi8(__m256i __O, __mmask32 __M, __m128i __A) // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_broadcastb_epi8(__O, __M, __A); } +TEST_CONSTEXPR(match_v32qi(_mm256_mask_broadcastb_epi8((__m256i)(__v32qs){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63},(__mmask32)0xAAAAAAAA,(__m128i)(__v16qs){-120,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}),0,-120,2,-120,4,-120,6,-120,8,-120,10,-120,12,-120,14,-120,48,-120,50,-120,52,-120,54,-120,56,-120,58,-120,60,-120,62,-120)); __m256i test_mm256_maskz_broadcastb_epi8(__mmask32 __M, __m128i __A) { // CHECK-LABEL: test_mm256_maskz_broadcastb_epi8 @@ -3173,6 +3259,7 @@ __m256i test_mm256_maskz_broadcastb_epi8(__mmask32 __M, __m128i __A) { // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_broadcastb_epi8(__M, __A); } +TEST_CONSTEXPR(match_v32qi(_mm256_maskz_broadcastb_epi8((__mmask32)0xAAAAAAAA,(__m128i)(__v16qs){-120,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}),0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120)); __m128i test_mm_mask_broadcastw_epi16(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_broadcastw_epi16 @@ -3180,6 +3267,7 @@ __m128i test_mm_mask_broadcastw_epi16(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_broadcastw_epi16(__O, __M, __A); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_broadcastw_epi16((__m128i)(__v8hi){0,1,2,3,4,5,6,7},(__mmask8)0xAA,(__m128i)(__v8hi){-120,1,2,3,4,5,6,7}),0,-120,2,-120,4,-120,6,-120)); __m128i test_mm_maskz_broadcastw_epi16(__mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_maskz_broadcastw_epi16 @@ -3187,6 +3275,7 @@ __m128i test_mm_maskz_broadcastw_epi16(__mmask8 __M, __m128i __A) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_broadcastw_epi16(__M, __A); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_broadcastw_epi16((__mmask8)0xAA,(__m128i)(__v8hi){-120,1,2,3,4,5,6,7}),0,-120,0,-120,0,-120,0,-120)); __m256i test_mm256_mask_broadcastw_epi16(__m256i __O, __mmask16 __M, __m128i __A) { // CHECK-LABEL: test_mm256_mask_broadcastw_epi16 @@ -3194,6 +3283,7 @@ __m256i test_mm256_mask_broadcastw_epi16(__m256i __O, __mmask16 __M, __m128i __A // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_broadcastw_epi16(__O, __M, __A); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_broadcastw_epi16((__m256i)(__v16hi){0,1,2,3,4,5,6,7,24,25,26,27,28,29,30,31},(__mmask16)0xAAAA,(__m128i)(__v8hi){-120,1,2,3,4,5,6,7}),0,-120,2,-120,4,-120,6,-120,24,-120,26,-120,28,-120,30,-120)); __m256i test_mm256_maskz_broadcastw_epi16(__mmask16 __M, __m128i __A) { // CHECK-LABEL: test_mm256_maskz_broadcastw_epi16 @@ -3201,6 +3291,8 @@ __m256i test_mm256_maskz_broadcastw_epi16(__mmask16 __M, __m128i __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_broadcastw_epi16(__M, __A); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_broadcastw_epi16((__mmask16)0xAAAA,(__m128i)(__v8hi){-120,1,2,3,4,5,6,7}),0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120,0,-120)); + __m128i test_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A){ // CHECK-LABEL: test_mm_mask_set1_epi8 // CHECK: insertelement <16 x i8> poison, i8 %{{.*}}, i32 0 @@ -3222,6 +3314,8 @@ __m128i test_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A){ // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_set1_epi8(__O, __M, __A); } +TEST_CONSTEXPR(match_v16qi(_mm_mask_set1_epi8((__m128i)(__v16qi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16},(__mmask16)0xAAAA,(char)42),1,42,3,42,5,42,7,42,9,42,11,42,13,42,15,42)); + __m128i test_mm_maskz_set1_epi8 ( __mmask16 __M, char __A){ // CHECK-LABEL: test_mm_maskz_set1_epi8 // CHECK: insertelement <16 x i8> poison, i8 %{{.*}}, i32 0 @@ -3243,6 +3337,7 @@ __m128i test_mm_maskz_set1_epi8 ( __mmask16 __M, char __A){ // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_set1_epi8( __M, __A); } +TEST_CONSTEXPR(match_v16qi(_mm_maskz_set1_epi8((__mmask16)0xAAAA,(char)42),0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42)); __m256i test_mm256_mask_set1_epi8(__m256i __O, __mmask32 __M, char __A) { // CHECK-LABEL: test_mm256_mask_set1_epi8 @@ -3281,6 +3376,7 @@ __m256i test_mm256_mask_set1_epi8(__m256i __O, __mmask32 __M, char __A) { // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_set1_epi8(__O, __M, __A); } +TEST_CONSTEXPR(match_v32qi(_mm256_mask_set1_epi8((__m256i)(__v32qi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64},(__mmask32)0xAAAAAAAA,(char)42),1,42,3,42,5,42,7,42,9,42,11,42,13,42,15,42,49,42,51,42,53,42,55,42,57,42,59,42,61,42,63,42)); __m256i test_mm256_maskz_set1_epi8( __mmask32 __M, char __A) { // CHECK-LABEL: test_mm256_maskz_set1_epi8 @@ -3319,7 +3415,7 @@ __m256i test_mm256_maskz_set1_epi8( __mmask32 __M, char __A) { // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_set1_epi8( __M, __A); } - +TEST_CONSTEXPR( match_v32qi( _mm256_maskz_set1_epi8( (__mmask32)0xAAAAAAAA, (char)42 ), 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42 ) ); __m256i test_mm256_mask_set1_epi16(__m256i __O, __mmask16 __M, short __A) { // CHECK-LABEL: test_mm256_mask_set1_epi16 @@ -3342,6 +3438,7 @@ __m256i test_mm256_mask_set1_epi16(__m256i __O, __mmask16 __M, short __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_mask_set1_epi16(__O, __M, __A); } +TEST_CONSTEXPR(match_v16hi(_mm256_mask_set1_epi16((__m256i)(__v16hi){1,2,3,4,5,6,7,8,25,26,27,28,29,30,31,32},(__mmask16)0xAAAA,42),1,42,3,42,5,42,7,42,25,42,27,42,29,42,31,42)); __m256i test_mm256_maskz_set1_epi16(__mmask16 __M, short __A) { // CHECK-LABEL: test_mm256_maskz_set1_epi16 @@ -3364,6 +3461,7 @@ __m256i test_mm256_maskz_set1_epi16(__mmask16 __M, short __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}} return _mm256_maskz_set1_epi16(__M, __A); } +TEST_CONSTEXPR(match_v16hi(_mm256_maskz_set1_epi16((__mmask16)0xAAAA,42),0,42,0,42,0,42,0,42,0,42,0,42,0,42,0,42)); __m128i test_mm_mask_set1_epi16(__m128i __O, __mmask8 __M, short __A) { // CHECK-LABEL: test_mm_mask_set1_epi16 @@ -3378,6 +3476,7 @@ __m128i test_mm_mask_set1_epi16(__m128i __O, __mmask8 __M, short __A) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_mask_set1_epi16(__O, __M, __A); } +TEST_CONSTEXPR(match_v8hi(_mm_mask_set1_epi16((__m128i)(__v8hi){1,2,3,4,5,6,7,8},(__mmask8)0xAA,42),1,42,3,42,5,42,7,42)); __m128i test_mm_maskz_set1_epi16(__mmask8 __M, short __A) { // CHECK-LABEL: test_mm_maskz_set1_epi16 @@ -3392,6 +3491,8 @@ __m128i test_mm_maskz_set1_epi16(__mmask8 __M, short __A) { // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}} return _mm_maskz_set1_epi16(__M, __A); } +TEST_CONSTEXPR(match_v8hi(_mm_maskz_set1_epi16((__mmask8)0xAA,42),0,42,0,42,0,42,0,42)); + __m128i test_mm_permutexvar_epi16(__m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_permutexvar_epi16 // CHECK: @llvm.x86.avx512.permvar.hi.128 diff --git a/clang/test/DebugInfo/CXX/decl-member-call.cpp b/clang/test/DebugInfo/CXX/decl-member-call.cpp new file mode 100644 index 0000000000000..95758a2985c0c --- /dev/null +++ b/clang/test/DebugInfo/CXX/decl-member-call.cpp @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -O1 -triple x86_64-unknown_unknown -emit-llvm \ +// RUN: -debug-info-kind=standalone -dwarf-version=5 %s -o - | FileCheck %s + +// Ensure both nonmember and member calls to declared function +// have attached `DISubprogram`s. + +int nonmember(int n); + +struct S { + int x; + int member(int n); +}; + +int main(int argc, char** argv) { + struct S s = {}; + int a = s.member(argc); + int b = nonmember(argc); + return a + b; +} + +// CHECK: declare !dbg ![[SP1:[0-9]+]] noundef i32 @_ZN1S6memberEi( +// CHECK: declare !dbg ![[SP2:[0-9]+]] noundef i32 @_Z9nonmemberi( + +// CHECK: ![[SP1]] = !DISubprogram(name: "member", linkageName: "_ZN1S6memberEi" +// CHECK: ![[SP2]] = !DISubprogram(name: "nonmember", linkageName: "_Z9nonmemberi" diff --git a/clang/test/Sema/PR166843.cpp b/clang/test/Sema/PR166843.cpp new file mode 100644 index 0000000000000..5a6223bccc27e --- /dev/null +++ b/clang/test/Sema/PR166843.cpp @@ -0,0 +1,7 @@ +// RUN: %clang_cc1 -fsyntax-only %s -verify +namespace a { +template +void c() { + ((::c::x)); // expected-error {{'c' is not a class, namespace, or enumeration}} +} +} diff --git a/clang/tools/offload-arch/AMDGPUArchByHIP.cpp b/clang/tools/offload-arch/AMDGPUArchByHIP.cpp index 11cff4f5ecdbe..ff39a85d15628 100644 --- a/clang/tools/offload-arch/AMDGPUArchByHIP.cpp +++ b/clang/tools/offload-arch/AMDGPUArchByHIP.cpp @@ -98,8 +98,16 @@ static std::vector getSearchPaths() { // Custom comparison function for dll name static bool compareVersions(StringRef A, StringRef B) { auto ParseVersion = [](StringRef S) -> VersionTuple { - size_t Pos = S.find_last_of('_'); - StringRef VerStr = (Pos == StringRef::npos) ? S : S.substr(Pos + 1); + StringRef Filename = sys::path::filename(S); + size_t Pos = Filename.find_last_of('_'); + if (Pos == StringRef::npos) + return VersionTuple(); + + StringRef VerStr = Filename.substr(Pos + 1); + size_t DotPos = VerStr.find('.'); + if (DotPos != StringRef::npos) + VerStr = VerStr.substr(0, DotPos); + VersionTuple Vt; (void)Vt.tryParse(VerStr); return Vt; @@ -135,8 +143,6 @@ static std::pair findNewestHIPDLL() { Filename.ends_with(HipDLLSuffix)) DLLNames.push_back(sys::path::convert_to_slash(DirIt->path())); } - if (!DLLNames.empty()) - break; } if (DLLNames.empty()) diff --git a/clang/unittests/AST/ASTImporterTest.cpp b/clang/unittests/AST/ASTImporterTest.cpp index 4c7ea5e338a13..3cab4c600b1b1 100644 --- a/clang/unittests/AST/ASTImporterTest.cpp +++ b/clang/unittests/AST/ASTImporterTest.cpp @@ -3300,6 +3300,72 @@ TEST_P(ImportExpr, ConceptNestedNonInstantiationDependentRequirement) { conceptDecl(has(requiresExpr(has(requiresExprBodyDecl()))))); } +TEST_P(ImportExpr, ImportSubstNonTypeTemplateParmPackExpr) { + MatchVerifier Verifier; + const char *Code = R"( + template struct X {}; + template struct Z {}; + + template struct E { + template using B = Z...>; + template E(B); + }; + using declToImport = E<1, 3>; + )"; + testImport(Code, Lang_CXX20, "", Lang_CXX20, Verifier, + typedefNameDecl(hasName("declToImport"))); +} + +TEST_P(ImportExpr, ImportCXXParenListInitExpr) { + MatchVerifier Verifier; + const char *Code = R"( + struct Node { + int val; + double d; + }; + Node* declToImport() { return new Node(2, 3.14); } + )"; + testImport(Code, Lang_CXX20, "", Lang_CXX20, Verifier, + functionDecl(hasName("declToImport"))); +} + +TEST_P(ImportExpr, ImportPseudoObjectExpr) { + MatchVerifier Verifier; + const char *Code = R"( + namespace std { + struct strong_ordering { + int n; + constexpr operator int() const { return n; } + static const strong_ordering less, equal, greater; + }; + constexpr strong_ordering strong_ordering::less{-1}, + strong_ordering::equal{0}, strong_ordering::greater{1}; + } + + struct A { + std::strong_ordering operator<=>(const A&) const; + }; + struct B { + bool operator==(const B&) const; + bool operator<(const B&) const; + }; + + template struct Cmp : T { + std::strong_ordering operator<=>(const Cmp&) const = default; + }; + + void use(...); + void declToImport() { + use( + Cmp() <=> Cmp(), + Cmp() <=> Cmp() + ); + } + )"; + testImport(Code, Lang_CXX20, "", Lang_CXX20, Verifier, + functionDecl(hasName("declToImport"))); +} + class ImportImplicitMethods : public ASTImporterOptionSpecificTestBase { public: static constexpr auto DefaultCode = R"( diff --git a/clang/www/c_status.html b/clang/www/c_status.html index 8220bc433d815..6e25ad7df01ce 100644 --- a/clang/www/c_status.html +++ b/clang/www/c_status.html @@ -324,7 +324,7 @@

C2y implementation status

Matching of Multi-Dimensional Arrays in Generic Selection Expressions
N3348 - Unknown + No The __COUNTER__ predefined macro diff --git a/flang/include/flang/Lower/DirectivesCommon.h b/flang/include/flang/Lower/DirectivesCommon.h index 707c8f88e00d9..029d70ec01bc5 100644 --- a/flang/include/flang/Lower/DirectivesCommon.h +++ b/flang/include/flang/Lower/DirectivesCommon.h @@ -515,11 +515,19 @@ fir::factory::AddrAndBoundsInfo gatherDataOperandAddrAndBounds( } bool dataExvIsAssumedSize = Fortran::semantics::IsAssumedSizeArray(symRef->get().GetUltimate()); - if (genDefaultBounds && - mlir::isa(fir::unwrapRefType(info.addr.getType()))) + if (genDefaultBounds && mlir::isa( + fir::unwrapRefType(info.addr.getType()))) { bounds = fir::factory::genBaseBoundsOps( builder, operandLocation, dataExv, dataExvIsAssumedSize, strideIncludeLowerExtent); + } + if (genDefaultBounds && fir::characterWithDynamicLen( + fir::unwrapRefType(info.addr.getType())) || + mlir::isa( + fir::unwrapRefType(info.addr.getType()))) { + bounds = {fir::factory::genBoundsOpFromBoxChar( + builder, operandLocation, dataExv, info)}; + } asFortran << symRef->get().name().ToString(); } else { // Unsupported llvm::report_fatal_error("Unsupported type of OpenACC operand"); diff --git a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp index 1c76fb0057b82..ee4b56d5c7d5f 100644 --- a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp +++ b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp @@ -973,38 +973,6 @@ class MapInfoFinalizationPass deferrableDesc.clear(); expandedBaseAddr.clear(); - // First, walk `omp.map.info` ops to see if any of them have varPtrs - // with an underlying type of fir.char, i.e a character - // with dynamic length. If so, check if they need bounds added. - func->walk([&](mlir::omp::MapInfoOp op) { - if (!op.getBounds().empty()) - return; - - mlir::Value varPtr = op.getVarPtr(); - mlir::Type underlyingVarType = fir::unwrapRefType(varPtr.getType()); - - if (!fir::characterWithDynamicLen(underlyingVarType)) - return; - - fir::factory::AddrAndBoundsInfo info = - fir::factory::getDataOperandBaseAddr( - builder, varPtr, /*isOptional=*/false, varPtr.getLoc()); - - fir::ExtendedValue extendedValue = - hlfir::translateToExtendedValue(varPtr.getLoc(), builder, - hlfir::Entity{info.addr}, - /*continguousHint=*/true) - .first; - builder.setInsertionPoint(op); - llvm::SmallVector boundsOps = - fir::factory::genImplicitBoundsOps( - builder, info, extendedValue, - /*dataExvIsAssumedSize=*/false, varPtr.getLoc()); - - op.getBoundsMutable().append(boundsOps); - }); - // Next, walk `omp.map.info` ops to see if any record members should be // implicitly mapped. // TODO/FIXME/UPDATE: I believe we need to add implicit capture of diff --git a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp index 09126e047d382..a64494510d847 100644 --- a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp +++ b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp @@ -41,8 +41,7 @@ namespace { static bool isAssumedSize(mlir::ValueRange shape) { if (shape.size() != 1) return false; - std::optional val = fir::getIntIfConstant(shape[0]); - if (val && *val == -1) + if (llvm::isa_and_nonnull(shape[0].getDefiningOp())) return true; return false; } diff --git a/flang/lib/Semantics/check-omp-atomic.cpp b/flang/lib/Semantics/check-omp-atomic.cpp index 2707921ca1dfa..ec03e6fe2d920 100644 --- a/flang/lib/Semantics/check-omp-atomic.cpp +++ b/flang/lib/Semantics/check-omp-atomic.cpp @@ -590,9 +590,11 @@ void OmpStructureChecker::CheckAtomicVariable( CheckAtomicType(syms.back(), source, atom.AsFortran(), checkTypeOnPointer); - if (IsAllocatable(syms.back()) && !IsArrayElement(atom)) { - context_.Say(source, "Atomic variable %s cannot be ALLOCATABLE"_err_en_US, - atom.AsFortran()); + if (!IsArrayElement(atom) && !ExtractComplexPart(atom)) { + if (IsAllocatable(syms.back())) { + context_.Say(source, "Atomic variable %s cannot be ALLOCATABLE"_err_en_US, + atom.AsFortran()); + } } } diff --git a/flang/test/Analysis/AliasAnalysis/modref-call-globals.f90 b/flang/test/Analysis/AliasAnalysis/modref-call-globals.f90 index 695b38ed406a5..fd1d37d18ae15 100644 --- a/flang/test/Analysis/AliasAnalysis/modref-call-globals.f90 +++ b/flang/test/Analysis/AliasAnalysis/modref-call-globals.f90 @@ -75,7 +75,7 @@ subroutine internal subroutine test_common implicit none real :: test_var_x_common - common /comm/ test_var_x_common + common /comm/ test_var_x_common call test_effect_external() end subroutine ! CHECK-LABEL: Testing : "_QPtest_common" diff --git a/flang/test/Evaluate/folding12.f90 b/flang/test/Evaluate/folding12.f90 index 016e692f66264..1a0a8cb064c4c 100644 --- a/flang/test/Evaluate/folding12.f90 +++ b/flang/test/Evaluate/folding12.f90 @@ -5,7 +5,7 @@ module m1 integer :: parent_field end type parent_type type, extends(parent_type) :: child_type - integer :: child_field + integer :: child_field end type child_type type parent_array_type integer, dimension(2) :: parent_field @@ -21,7 +21,7 @@ module m1 type(child_type), parameter :: child_const2 = child_type(12, 13) type(child_type), parameter :: array_var(2) = & [child_type(14, 15), child_type(16, 17)] - logical, parameter :: test_array_child = array_var(2)%child_field == 17 + logical, parameter :: test_array_child = array_var(2)%child_field == 17 logical, parameter :: test_array_parent = array_var(2)%parent_field == 16 type array_type @@ -40,7 +40,7 @@ module m1 type(child_array_type), parameter, dimension(2) :: child_const5 = & [child_array_type([22, 23], 24), child_array_type([25, 26], 27)] integer, dimension(2), parameter :: int_const6 = child_const5(:)%parent_field(2) - logical, parameter :: test_child3 = int_const6(1) == 23 + logical, parameter :: test_child3 = int_const6(1) == 23 type(child_type), parameter :: child_const7 = child_type(28, 29) type(parent_type), parameter :: parent_const8 = child_const7%parent_type @@ -114,7 +114,7 @@ module m3 logical, parameter :: test_parent1 = child_const1%parent_field1 == 12 logical, parameter :: test_parent2 = child_const1%parent_field2 == 10.0 logical, parameter :: test_parent3 = child_const1%parent_field3 .eqv. .false. - logical, parameter :: test_parent4 = & + logical, parameter :: test_parent4 = & child_const1%parent_type%parent_field1 == 12 logical, parameter :: test_parent5 = & child_const1%parent_type%parent_field2 == 10.0 diff --git a/flang/test/Examples/omp-in-reduction-clause.f90 b/flang/test/Examples/omp-in-reduction-clause.f90 index ced672220fe78..73ba197d5605a 100644 --- a/flang/test/Examples/omp-in-reduction-clause.f90 +++ b/flang/test/Examples/omp-in-reduction-clause.f90 @@ -15,7 +15,7 @@ subroutine omp_in_reduction_taskgroup() do i=1,10 z = z * 5 end do - !$omp end taskloop + !$omp end taskloop !$omp end taskgroup end subroutine omp_in_reduction_taskgroup diff --git a/flang/test/Fir/CUDA/cuda-constructor-2.f90 b/flang/test/Fir/CUDA/cuda-constructor-2.f90 index 62118bb2eed2e..f21d8f9c37637 100644 --- a/flang/test/Fir/CUDA/cuda-constructor-2.f90 +++ b/flang/test/Fir/CUDA/cuda-constructor-2.f90 @@ -28,10 +28,10 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry>, !fir.ref, !fir.ref, i64) -> () // CHECK-DAG: %[[BOX:.*]] = fir.address_of(@_QMmtestsEndev) : !fir.ref>>> // CHECK-DAG: %[[BOXREF:.*]] = fir.convert %[[BOX]] : (!fir.ref>>>) -> !fir.ref -// CHECK-DAG: fir.call @_FortranACUFRegisterVariable(%[[MODULE:.*]], %[[BOXREF]], %{{.*}}, %{{.*}}) +// CHECK-DAG: fir.call @_FortranACUFRegisterVariable(%[[MODULE:.*]], %[[BOXREF]], %{{.*}}, %{{.*}}) // -// ----- +// ----- // Checking that constant global variables are not registered @@ -40,7 +40,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>, i16 = dense<16> : vector<2xi64>, i1 = dense<8> : vector<2xi64>, !llvm.ptr = dense<64> : vector<4xi64>, f80 = dense<128> : vector<2xi64>, i128 = dense<128> : vector<2xi64>, i64 = dense<64> : vector<2xi64>, !llvm.ptr<271> = dense<32> : vector<4xi64>, !llvm.ptr<272> = dense<64> : vector<4xi64>, f128 = dense<128> : vector<2xi64>, !llvm.ptr<270> = dense<32> : vector<4xi64>, f16 = dense<16> : vector<2xi64>, f64 = dense<64> : vector<2xi64>, i32 = dense<32> : vector<2xi64>, "dlti.stack_alignment" = 128 : i64, "dlti.endianness" = "little">, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git 3372303188df0f7f8ac26e7ab610cf8b0f716d42)", llvm.target_triple = "x86_64-unknown-linux-gnu"} { fir.global @_QMiso_c_bindingECc_int constant : i32 - + fir.type_info @_QM__fortran_builtinsT__builtin_c_ptr noinit nodestroy nofinal : !fir.type<_QM__fortran_builtinsT__builtin_c_ptr{__address:i64}> gpu.module @cuda_device_mod { @@ -63,7 +63,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec : vector<2xi64>, i // ----- -module attributes {dlti.dl_spec = #dlti.dl_spec : vector<2xi64>, i16 = dense<16> : vector<2xi64>, i1 = dense<8> : vector<2xi64>, !llvm.ptr = dense<64> : vector<4xi64>, f80 = dense<128> : vector<2xi64>, i128 = dense<128> : vector<2xi64>, i64 = dense<64> : vector<2xi64>, !llvm.ptr<271> = dense<32> : vector<4xi64>, !llvm.ptr<272> = dense<64> : vector<4xi64>, f128 = dense<128> : vector<2xi64>, !llvm.ptr<270> = dense<32> : vector<4xi64>, f16 = dense<16> : vector<2xi64>, f64 = dense<64> : vector<2xi64>, i32 = dense<32> : vector<2xi64>, "dlti.stack_alignment" = 128 : i64, "dlti.endianness" = "little">, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git 3372303188df0f7f8ac26e7ab610cf8b0f716d42)", llvm.target_triple = "x86_64-unknown-linux-gnu"} { +module attributes {dlti.dl_spec = #dlti.dl_spec : vector<2xi64>, i16 = dense<16> : vector<2xi64>, i1 = dense<8> : vector<2xi64>, !llvm.ptr = dense<64> : vector<4xi64>, f80 = dense<128> : vector<2xi64>, i128 = dense<128> : vector<2xi64>, i64 = dense<64> : vector<2xi64>, !llvm.ptr<271> = dense<32> : vector<4xi64>, !llvm.ptr<272> = dense<64> : vector<4xi64>, f128 = dense<128> : vector<2xi64>, !llvm.ptr<270> = dense<32> : vector<4xi64>, f16 = dense<16> : vector<2xi64>, f64 = dense<64> : vector<2xi64>, i32 = dense<32> : vector<2xi64>, "dlti.stack_alignment" = 128 : i64, "dlti.endianness" = "little">, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git 3372303188df0f7f8ac26e7ab610cf8b0f716d42)", llvm.target_triple = "x86_64-unknown-linux-gnu"} { fir.global @_QMmEa00 {data_attr = #cuf.cuda} : !fir.box>> { %c0 = arith.constant 0 : index %0 = fir.zero_bits !fir.heap> diff --git a/flang/test/Fir/CUDA/cuda-implicit-device-global.f90 b/flang/test/Fir/CUDA/cuda-implicit-device-global.f90 index 758c2e2244257..f399767a885fa 100644 --- a/flang/test/Fir/CUDA/cuda-implicit-device-global.f90 +++ b/flang/test/Fir/CUDA/cuda-implicit-device-global.f90 @@ -144,7 +144,7 @@ // Test that global used in device function are flagged with the correct // Checking that a constant fir.global that is used in device code is copied over to the device // CHECK: fir.global linkonce @_QQclX5465737420504153534544 constant : !fir.char<1,11> -// CHECK-LABEL: gpu.module @cuda_device_mod +// CHECK-LABEL: gpu.module @cuda_device_mod // CHECK: fir.global linkonce @_QQclX5465737420504153534544 constant // ----- @@ -312,10 +312,10 @@ // Test that global used in device function are flagged with the correct // ----- // Variables with initialization are promoted to non constant global. -// +// // attributes(global) subroutine kernel4() // integer :: a = 4 -// end subroutine +// end subroutine func.func @_QPkernel4() attributes {cuf.proc_attr = #cuf.cuda_proc} { %0 = fir.address_of(@_QFkernel4Ea) : !fir.ref diff --git a/flang/test/Fir/CUDA/cuda-shared-offset.mlir b/flang/test/Fir/CUDA/cuda-shared-offset.mlir index 9c057d024426a..37b36b2bd050e 100644 --- a/flang/test/Fir/CUDA/cuda-shared-offset.mlir +++ b/flang/test/Fir/CUDA/cuda-shared-offset.mlir @@ -3,9 +3,9 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git cae351f3453a0a26ec8eb2ddaf773c24a29d929e)", llvm.target_triple = "x86_64-unknown-linux-gnu"} { gpu.module @cuda_device_mod { gpu.func @_QPdynshared() kernel { - %c-1 = arith.constant -1 : index - %6 = cuf.shared_memory !fir.array, %c-1 : index {bindc_name = "r", uniq_name = "_QFdynsharedEr"} -> !fir.ref> - %7 = fir.shape %c-1 : (index) -> !fir.shape<1> + %0 = fir.assumed_size_extent : index + %6 = cuf.shared_memory !fir.array, %0 : index {bindc_name = "r", uniq_name = "_QFdynsharedEr"} -> !fir.ref> + %7 = fir.shape %0 : (index) -> !fir.shape<1> %8 = fir.declare %6(%7) {data_attr = #cuf.cuda, uniq_name = "_QFdynsharedEr"} : (!fir.ref>, !fir.shape<1>) -> !fir.ref> gpu.return } @@ -14,7 +14,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry, %c-1 : index {bindc_name = "r", uniq_name = "_QFdynsharedEr"} -> !fir.ref> +// CHECK: %{{.*}} = cuf.shared_memory[%c0{{.*}} : i32] !fir.array, %{{.*}} : index {bindc_name = "r", uniq_name = "_QFdynsharedEr"} -> !fir.ref> // CHECK: gpu.return // CHECK: } // CHECK: fir.global external @_QPdynshared__shared_mem {alignment = 4 : i64, data_attr = #cuf.cuda} : !fir.array<0xi8> @@ -127,16 +127,16 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry> {cuf.data_attr = #cuf.cuda, fir.bindc_name = "a"}) attributes {cuf.proc_attr = #cuf.cuda_proc} { %0 = fir.dummy_scope : !fir.dscope - %c-1 = arith.constant -1 : index - %1 = fir.shape %c-1 : (index) -> !fir.shape<1> + %a0 = fir.assumed_size_extent : index + %1 = fir.shape %a0 : (index) -> !fir.shape<1> %2:2 = hlfir.declare %arg0(%1) dummy_scope %0 {data_attr = #cuf.cuda, uniq_name = "_QMmtestsFtestanyEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.box>, !fir.ref>) %3 = fir.address_of(@_QM__fortran_builtinsE__builtin_blockdim) : !fir.ref> %4:2 = hlfir.declare %3 {uniq_name = "_QM__fortran_builtinsE__builtin_blockdim"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) %5 = fir.address_of(@_QM__fortran_builtinsE__builtin_blockidx) : !fir.ref> %6:2 = hlfir.declare %5 {uniq_name = "_QM__fortran_builtinsE__builtin_blockidx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) - %c-1_0 = arith.constant -1 : index - %7 = cuf.shared_memory !fir.array, %c-1_0 : index {bindc_name = "dmasks", uniq_name = "_QMmtestsFtestanyEdmasks"} -> !fir.ref> - %8 = fir.shape %c-1_0 : (index) -> !fir.shape<1> + %a2 = fir.assumed_size_extent : index + %7 = cuf.shared_memory !fir.array, %a2 : index {bindc_name = "dmasks", uniq_name = "_QMmtestsFtestanyEdmasks"} -> !fir.ref> + %8 = fir.shape %a2 : (index) -> !fir.shape<1> %9:2 = hlfir.declare %7(%8) {data_attr = #cuf.cuda, uniq_name = "_QMmtestsFtestanyEdmasks"} : (!fir.ref>, !fir.shape<1>) -> (!fir.box>, !fir.ref>) %10 = fir.address_of(@_QM__fortran_builtinsE__builtin_griddim) : !fir.ref> %11:2 = hlfir.declare %10 {uniq_name = "_QM__fortran_builtinsE__builtin_griddim"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) @@ -146,9 +146,9 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry) -> (!fir.ref, !fir.ref) %16 = fir.alloca i32 {bindc_name = "j", uniq_name = "_QMmtestsFtestanyEj"} %17:2 = hlfir.declare %16 {uniq_name = "_QMmtestsFtestanyEj"} : (!fir.ref) -> (!fir.ref, !fir.ref) - %c-1_1 = arith.constant -1 : index - %18 = cuf.shared_memory !fir.array, %c-1_1 : index {bindc_name = "smasks", uniq_name = "_QMmtestsFtestanyEsmasks"} -> !fir.ref> - %19 = fir.shape %c-1_1 : (index) -> !fir.shape<1> + %a3 = fir.assumed_size_extent : index + %18 = cuf.shared_memory !fir.array, %a3 : index {bindc_name = "smasks", uniq_name = "_QMmtestsFtestanyEsmasks"} -> !fir.ref> + %19 = fir.shape %a3 : (index) -> !fir.shape<1> %20:2 = hlfir.declare %18(%19) {data_attr = #cuf.cuda, uniq_name = "_QMmtestsFtestanyEsmasks"} : (!fir.ref>, !fir.shape<1>) -> (!fir.box>, !fir.ref>) gpu.return } @@ -156,7 +156,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry, %c-1{{.*}} : index {bindc_name = "dmasks", uniq_name = "_QMmtestsFtestanyEdmasks"} -> !fir.ref> -// CHECK: %{{.*}} = cuf.shared_memory[%c0{{.*}} : i32] !fir.array, %c-1{{.*}} : index {bindc_name = "smasks", uniq_name = "_QMmtestsFtestanyEsmasks"} -> !fir.ref> +// CHECK: %{{.*}} = cuf.shared_memory[%c0{{.*}} : i32] !fir.array, %{{.*}} : index {bindc_name = "dmasks", uniq_name = "_QMmtestsFtestanyEdmasks"} -> !fir.ref> +// CHECK: %{{.*}} = cuf.shared_memory[%c0{{.*}} : i32] !fir.array, %{{.*}} : index {bindc_name = "smasks", uniq_name = "_QMmtestsFtestanyEsmasks"} -> !fir.ref> // CHECK: fir.global external @_QMmtestsPtestany__shared_mem {alignment = 8 : i64, data_attr = #cuf.cuda} : !fir.array<0xi8> diff --git a/flang/test/Fir/dispatch.f90 b/flang/test/Fir/dispatch.f90 index 2b1ae225986ca..5574d8586cdc0 100644 --- a/flang/test/Fir/dispatch.f90 +++ b/flang/test/Fir/dispatch.f90 @@ -296,7 +296,7 @@ program test_type_to_class ! CHECK-LABEL: _QMdispatch1Pno_pass_array_pointer ! CHECK-LABEL: _QMdispatch1Pcall_a1_proc -! Check the layout of the binding table. This is easier to do in FIR than in +! Check the layout of the binding table. This is easier to do in FIR than in ! LLVM IR. ! BT-LABEL: fir.type_info @_QMdispatch1Tty_kindK10K20 diff --git a/flang/test/Fir/non-trivial-procedure-binding-description.f90 b/flang/test/Fir/non-trivial-procedure-binding-description.f90 index 668928600157b..13fcfeed774cf 100644 --- a/flang/test/Fir/non-trivial-procedure-binding-description.f90 +++ b/flang/test/Fir/non-trivial-procedure-binding-description.f90 @@ -25,6 +25,6 @@ end module a program main use a - type(f) :: obj + type(f) :: obj print *, obj%foo(obj) end program diff --git a/flang/test/HLFIR/assumed_shape_with_value_keyword.f90 b/flang/test/HLFIR/assumed_shape_with_value_keyword.f90 index 0f904041b7101..b8c86dd36aeff 100644 --- a/flang/test/HLFIR/assumed_shape_with_value_keyword.f90 +++ b/flang/test/HLFIR/assumed_shape_with_value_keyword.f90 @@ -31,7 +31,7 @@ subroutine test_integer_value2(x) ! CHECK: return ! CHECK: } -subroutine test_real_value1(x) +subroutine test_real_value1(x) real, value :: x(:) call internal_call3(x) end @@ -45,7 +45,7 @@ subroutine test_real_value1(x) ! CHECK: return ! CHECK: } -subroutine test_real_value2(x) +subroutine test_real_value2(x) real, value :: x(:,:) call internal_call4(x) end diff --git a/flang/test/HLFIR/order_assignments/forall-proc-pointer-assignment-scheduling-character.f90 b/flang/test/HLFIR/order_assignments/forall-proc-pointer-assignment-scheduling-character.f90 index d2d1939890882..ff7f70bac1513 100644 --- a/flang/test/HLFIR/order_assignments/forall-proc-pointer-assignment-scheduling-character.f90 +++ b/flang/test/HLFIR/order_assignments/forall-proc-pointer-assignment-scheduling-character.f90 @@ -44,7 +44,7 @@ pure character(2) function f10() integer pure function decode(c) character(2), intent(in) :: c - decode = modulo(iachar(c(2:2))-49,10)+1 + decode = modulo(iachar(c(2:2))-49,10)+1 end function subroutine test_no_conflict(x) diff --git a/flang/test/Integration/unroll.f90 b/flang/test/Integration/unroll.f90 index f2c2ecb5cffac..63c71e1dc0078 100644 --- a/flang/test/Integration/unroll.f90 +++ b/flang/test/Integration/unroll.f90 @@ -3,7 +3,7 @@ ! CHECK-LABEL: unroll_dir subroutine unroll_dir integer :: a(10) - !dir$ unroll + !dir$ unroll ! CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}} ! CHECK-NOT: !llvm.loop ! CHECK: br label {{.*}}, !llvm.loop ![[UNROLL_ENABLE_FULL_ANNO:.*]] diff --git a/flang/test/Integration/unroll_and_jam.f90 b/flang/test/Integration/unroll_and_jam.f90 index 05b3aaa04a1e0..e5e509cce15aa 100644 --- a/flang/test/Integration/unroll_and_jam.f90 +++ b/flang/test/Integration/unroll_and_jam.f90 @@ -27,7 +27,7 @@ end subroutine unroll_and_jam_dir_0 ! CHECK-LABEL: unroll_and_jam_dir_1 subroutine unroll_and_jam_dir_1 integer :: a(10) - !dir$ unroll_and_jam 1 + !dir$ unroll_and_jam 1 ! CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}} ! CHECK-NOT: !llvm.loop ! CHECK: br label {{.*}}, !llvm.loop ![[ANNOTATION_DISABLE]] diff --git a/flang/test/Lower/OpenMP/atomic-update-capture-complex-part.f90 b/flang/test/Lower/OpenMP/atomic-update-capture-complex-part.f90 new file mode 100644 index 0000000000000..ee15b8805a69b --- /dev/null +++ b/flang/test/Lower/OpenMP/atomic-update-capture-complex-part.f90 @@ -0,0 +1,17 @@ +!RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s + +! Check that this compiles successfully. + +!CHECK: omp.atomic.capture +!CHECK: omp.atomic.read +!CHECK: omp.atomic.update +subroutine f00 + implicit none + real :: c + complex, allocatable :: x + !$omp atomic update capture + c = x%re + x%re = x%re + 1.0 + !$omp end atomic +end + diff --git a/flang/test/Lower/OpenMP/dynamic-len-char-bounds-gen.f90 b/flang/test/Lower/OpenMP/dynamic-len-char-bounds-gen.f90 new file mode 100644 index 0000000000000..88839221cc6d2 --- /dev/null +++ b/flang/test/Lower/OpenMP/dynamic-len-char-bounds-gen.f90 @@ -0,0 +1,19 @@ +! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s + +subroutine TestCharLenBounds(clen) + + character(len=*) :: clen + + !$omp target map(clen) + !$omp end target +end subroutine TestCharLenBounds + +!CHECK: %[[DUMMY:.*]] = fir.dummy_scope : !fir.dscope +!CHECK: %[[UNBOX:.*]]:2 = fir.unboxchar %{{.*}} : (!fir.boxchar<1>) -> (!fir.ref>, index) +!CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[UNBOX]]#0 typeparams %2#1 dummy_scope %[[DUMMY]] {uniq_name = "_QFtestcharlenboundsEclen"} : (!fir.ref>, index, !fir.dscope) -> (!fir.boxchar<1>, !fir.ref>) +!CHECK: %[[LB_START_IDX:.*]] = arith.constant 0 : index +!CHECK: %[[STRIDE:.*]] = arith.constant 1 : index +!CHECK: %[[EXTENT:.*]]:2 = fir.unboxchar %[[DECLARE]]#0 : (!fir.boxchar<1>) -> (!fir.ref>, index) +!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT]]#1, %[[STRIDE]] : index +!CHECK: %[[BOUNDS:.*]] = omp.map.bounds lower_bound(%[[LB_START_IDX]] : index) upper_bound(%[[UB]] : index) extent(%[[EXTENT]]#1 : index) stride(%[[STRIDE]] : index) start_idx(%[[LB_START_IDX]] : index) {stride_in_bytes = true} +!CHECK: %{{.*}} = omp.map.info {{.*}} bounds(%[[BOUNDS]]) {{.*}} diff --git a/flang/test/Transforms/DoConcurrent/basic_host.f90 b/flang/test/Transforms/DoConcurrent/basic_host.f90 index 01b6524e13c36..cbe6e123597dd 100644 --- a/flang/test/Transforms/DoConcurrent/basic_host.f90 +++ b/flang/test/Transforms/DoConcurrent/basic_host.f90 @@ -6,7 +6,7 @@ ! RUN: | FileCheck %s ! RUN: bbc -emit-hlfir -fopenmp -fdo-concurrent-to-openmp=host %s -o - \ ! RUN: | FileCheck %s - + ! CHECK-LABEL: DO_CONCURRENT_BASIC program do_concurrent_basic ! CHECK: %[[ARR:.*]]:2 = hlfir.declare %{{.*}}(%{{.*}}) {uniq_name = "_QFEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) diff --git a/flang/test/Transforms/DoConcurrent/map_shape_info.f90 b/flang/test/Transforms/DoConcurrent/map_shape_info.f90 index 40f66c19718e8..95bfc236888d1 100644 --- a/flang/test/Transforms/DoConcurrent/map_shape_info.f90 +++ b/flang/test/Transforms/DoConcurrent/map_shape_info.f90 @@ -28,7 +28,7 @@ end program do_concurrent_shape ! CHECK: omp.map.info ! CHECK: omp.map.info -! CHECK: %[[DIM0_EXT_MAP:.*]] = omp.map.info +! CHECK: %[[DIM0_EXT_MAP:.*]] = omp.map.info ! CHECK-SAME: var_ptr(%[[DIM0_EXT]] : !fir.ref, index) ! CHECK-SAME: map_clauses(implicit) ! CHECK-SAME: capture(ByCopy) -> !fir.ref {name = "_QFEa.extent.dim0"} @@ -77,9 +77,9 @@ end subroutine do_concurrent_shape_shift ! CHECK: omp.map.info ! CHECK: omp.map.info -! CHECK: %[[DIM0_STRT_MAP:.*]] = omp.map.info +! CHECK: %[[DIM0_STRT_MAP:.*]] = omp.map.info ! CHECK-SAME: var_ptr(%[[DIM0_STRT]] : !fir.ref, index) -! CHECK-SAME: map_clauses(implicit) +! CHECK-SAME: map_clauses(implicit) ! CHECK-SAME: capture(ByCopy) -> !fir.ref {name = "_QF{{.*}}Ea.start_idx.dim0"} ! CHECK: %[[DIM0_EXT_MAP:.*]] = omp.map.info diff --git a/flang/test/Transforms/DoConcurrent/use_loop_bounds_in_body.f90 b/flang/test/Transforms/DoConcurrent/use_loop_bounds_in_body.f90 index b467747293ace..07a3b5b62b5a5 100644 --- a/flang/test/Transforms/DoConcurrent/use_loop_bounds_in_body.f90 +++ b/flang/test/Transforms/DoConcurrent/use_loop_bounds_in_body.f90 @@ -14,7 +14,7 @@ subroutine foo(a, n) do concurrent (i=1:n) a(i) = n end do -end subroutine +end subroutine ! CHECK-LABEL: func.func @_QPfoo ! CHECK: omp.target diff --git a/libcxx/docs/Status/Cxx2cPapers.csv b/libcxx/docs/Status/Cxx2cPapers.csv index e0e47b864d38f..0f4dbb882088a 100644 --- a/libcxx/docs/Status/Cxx2cPapers.csv +++ b/libcxx/docs/Status/Cxx2cPapers.csv @@ -81,7 +81,7 @@ "`P3379R0 `__","Constrain ``std::expected`` equality operators","2024-11 (Wrocław)","|Complete|","21","`#118135 `__","" "`P2862R1 `__","``text_encoding::name()`` should never return null values","2024-11 (Wrocław)","","","`#118371 `__","" "`P2897R7 `__","``aligned_accessor``: An ``mdspan`` accessor expressing pointer over-alignment","2024-11 (Wrocław)","|Complete|","21","`#118372 `__","" -"`P3355R1 `__","Fix ``submdspan`` for C++26","2024-11 (Wrocław)","","","`#118373 `__","" +"`P3355R2 `__","Fix ``submdspan`` for C++26","2024-11 (Wrocław)","","","`#118373 `__","" "`P3222R0 `__","Fix C++26 by adding transposed special cases for P2642 layouts","2024-11 (Wrocław)","","","`#118374 `__","" "`P3050R2 `__","Fix C++26 by optimizing ``linalg::conjugated`` for noncomplex value types","2024-11 (Wrocław)","","","`#118375 `__","" "`P3396R1 `__","``std::execution`` wording fixes","2024-11 (Wrocław)","","","`#118376 `__","" diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt index 09d4552664dd7..3845ec8376794 100644 --- a/libcxx/include/CMakeLists.txt +++ b/libcxx/include/CMakeLists.txt @@ -328,6 +328,8 @@ set(files __configuration/abi.h __configuration/availability.h __configuration/compiler.h + __configuration/experimental.h + __configuration/hardening.h __configuration/language.h __configuration/platform.h __coroutine/coroutine_handle.h diff --git a/libcxx/include/__config b/libcxx/include/__config index 357f77b7d27d6..e907961446201 100644 --- a/libcxx/include/__config +++ b/libcxx/include/__config @@ -14,6 +14,8 @@ #include <__configuration/abi.h> #include <__configuration/availability.h> #include <__configuration/compiler.h> +#include <__configuration/experimental.h> +#include <__configuration/hardening.h> #include <__configuration/language.h> #include <__configuration/platform.h> @@ -38,195 +40,6 @@ # define _LIBCPP_FREESTANDING # endif -// NOLINTNEXTLINE(libcpp-cpp-version-check) -# if __cplusplus < 201103L -# define _LIBCPP_CXX03_LANG -# endif - -# if __has_feature(experimental_library) -# ifndef _LIBCPP_ENABLE_EXPERIMENTAL -# define _LIBCPP_ENABLE_EXPERIMENTAL -# endif -# endif - -// Incomplete features get their own specific disabling flags. This makes it -// easier to grep for target specific flags once the feature is complete. -# if defined(_LIBCPP_ENABLE_EXPERIMENTAL) || defined(_LIBCPP_BUILDING_LIBRARY) -# define _LIBCPP_HAS_EXPERIMENTAL_LIBRARY 1 -# else -# define _LIBCPP_HAS_EXPERIMENTAL_LIBRARY 0 -# endif - -# define _LIBCPP_HAS_EXPERIMENTAL_PSTL _LIBCPP_HAS_EXPERIMENTAL_LIBRARY -# define _LIBCPP_HAS_EXPERIMENTAL_TZDB _LIBCPP_HAS_EXPERIMENTAL_LIBRARY -# define _LIBCPP_HAS_EXPERIMENTAL_SYNCSTREAM _LIBCPP_HAS_EXPERIMENTAL_LIBRARY -# define _LIBCPP_HAS_EXPERIMENTAL_HARDENING_OBSERVE_SEMANTIC _LIBCPP_HAS_EXPERIMENTAL_LIBRARY - -// HARDENING { - -// TODO(LLVM 23): Remove this. We're making these an error to catch folks who might not have migrated. -// Since hardening went through several changes (many of which impacted user-facing macros), -// we're keeping these checks around for a bit longer than usual. Failure to properly configure -// hardening results in checks being dropped silently, which is a pretty big deal. -# if defined(_LIBCPP_ENABLE_ASSERTIONS) -# error "_LIBCPP_ENABLE_ASSERTIONS has been removed, please use _LIBCPP_HARDENING_MODE= instead (see docs)" -# endif -# if defined(_LIBCPP_ENABLE_HARDENED_MODE) -# error "_LIBCPP_ENABLE_HARDENED_MODE has been removed, please use _LIBCPP_HARDENING_MODE= instead (see docs)" -# endif -# if defined(_LIBCPP_ENABLE_SAFE_MODE) -# error "_LIBCPP_ENABLE_SAFE_MODE has been removed, please use _LIBCPP_HARDENING_MODE= instead (see docs)" -# endif -# if defined(_LIBCPP_ENABLE_DEBUG_MODE) -# error "_LIBCPP_ENABLE_DEBUG_MODE has been removed, please use _LIBCPP_HARDENING_MODE= instead (see docs)" -# endif - -// The library provides the macro `_LIBCPP_HARDENING_MODE` which can be set to one of the following values: -// -// - `_LIBCPP_HARDENING_MODE_NONE`; -// - `_LIBCPP_HARDENING_MODE_FAST`; -// - `_LIBCPP_HARDENING_MODE_EXTENSIVE`; -// - `_LIBCPP_HARDENING_MODE_DEBUG`. -// -// These values have the following effects: -// -// - `_LIBCPP_HARDENING_MODE_NONE` -- sets the hardening mode to "none" which disables all runtime hardening checks; -// -// - `_LIBCPP_HARDENING_MODE_FAST` -- sets that hardening mode to "fast". The fast mode enables security-critical checks -// that can be done with relatively little runtime overhead in constant time; -// -// - `_LIBCPP_HARDENING_MODE_EXTENSIVE` -- sets the hardening mode to "extensive". The extensive mode is a superset of -// the fast mode that additionally enables checks that are relatively cheap and prevent common types of logic errors -// but are not necessarily security-critical; -// -// - `_LIBCPP_HARDENING_MODE_DEBUG` -- sets the hardening mode to "debug". The debug mode is a superset of the extensive -// mode and enables all checks available in the library, including internal assertions. Checks that are part of the -// debug mode can be very expensive and thus the debug mode is intended to be used for testing, not in production. - -// Inside the library, assertions are categorized so they can be cherry-picked based on the chosen hardening mode. These -// macros are only for internal use -- users should only pick one of the high-level hardening modes described above. -// -// - `_LIBCPP_ASSERT_VALID_INPUT_RANGE` -- checks that ranges (whether expressed as an iterator pair, an iterator and -// a sentinel, an iterator and a count, or a `std::range`) given as input to library functions are valid: -// - the sentinel is reachable from the begin iterator; -// - TODO(hardening): both iterators refer to the same container. -// -// - `_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS` -- checks that any attempts to access a container element, whether through -// the container object or through an iterator, are valid and do not attempt to go out of bounds or otherwise access -// a non-existent element. For iterator checks to work, bounded iterators must be enabled in the ABI. Types like -// `optional` and `function` are considered one-element containers for the purposes of this check. -// -// - `_LIBCPP_ASSERT_NON_NULL` -- checks that the pointer being dereferenced is not null. On most modern platforms zero -// address does not refer to an actual location in memory, so a null pointer dereference would not compromize the -// memory security of a program (however, it is still undefined behavior that can result in strange errors due to -// compiler optimizations). -// -// - `_LIBCPP_ASSERT_NON_OVERLAPPING_RANGES` -- for functions that take several ranges as arguments, checks that the -// given ranges do not overlap. -// -// - `_LIBCPP_ASSERT_VALID_DEALLOCATION` -- checks that an attempt to deallocate memory is valid (e.g. the given object -// was allocated by the given allocator). Violating this category typically results in a memory leak. -// -// - `_LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL` -- checks that a call to an external API doesn't fail in -// an unexpected manner. This includes triggering documented cases of undefined behavior in an external library (like -// attempting to unlock an unlocked mutex in pthreads). Any API external to the library falls under this category -// (from system calls to compiler intrinsics). We generally don't expect these failures to compromize memory safety or -// otherwise create an immediate security issue. -// -// - `_LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR` -- checks any operations that exchange nodes between containers to make sure -// the containers have compatible allocators. -// -// - `_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN` -- checks that the given argument is within the domain of valid arguments -// for the function. Violating this typically produces an incorrect result (e.g. the clamp algorithm returns the -// original value without clamping it due to incorrect functors) or puts an object into an invalid state (e.g. -// a string view where only a subset of elements is possible to access). This category is for assertions violating -// which doesn't cause any immediate issues in the library -- whatever the consequences are, they will happen in the -// user code. -// -// - `_LIBCPP_ASSERT_PEDANTIC` -- checks prerequisites which are imposed by the Standard, but violating which happens to -// be benign in our implementation. -// -// - `_LIBCPP_ASSERT_SEMANTIC_REQUIREMENT` -- checks that the given argument satisfies the semantic requirements imposed -// by the Standard. Typically, there is no simple way to completely prove that a semantic requirement is satisfied; -// thus, this would often be a heuristic check and it might be quite expensive. -// -// - `_LIBCPP_ASSERT_INTERNAL` -- checks that internal invariants of the library hold. These assertions don't depend on -// user input. -// -// - `_LIBCPP_ASSERT_UNCATEGORIZED` -- for assertions that haven't been properly classified yet. - -// clang-format off -# define _LIBCPP_HARDENING_MODE_NONE (1 << 1) -# define _LIBCPP_HARDENING_MODE_FAST (1 << 2) -# define _LIBCPP_HARDENING_MODE_EXTENSIVE (1 << 4) // Deliberately not ordered. -# define _LIBCPP_HARDENING_MODE_DEBUG (1 << 3) -// clang-format on - -# ifndef _LIBCPP_HARDENING_MODE - -# ifndef _LIBCPP_HARDENING_MODE_DEFAULT -# error _LIBCPP_HARDENING_MODE_DEFAULT is not defined. This definition should be set at configuration time in the \ -`__config_site` header, please make sure your installation of libc++ is not broken. -# endif - -# define _LIBCPP_HARDENING_MODE _LIBCPP_HARDENING_MODE_DEFAULT -# endif - -# if _LIBCPP_HARDENING_MODE != _LIBCPP_HARDENING_MODE_NONE && \ - _LIBCPP_HARDENING_MODE != _LIBCPP_HARDENING_MODE_FAST && \ - _LIBCPP_HARDENING_MODE != _LIBCPP_HARDENING_MODE_EXTENSIVE && \ - _LIBCPP_HARDENING_MODE != _LIBCPP_HARDENING_MODE_DEBUG -# error _LIBCPP_HARDENING_MODE must be set to one of the following values: \ -_LIBCPP_HARDENING_MODE_NONE, \ -_LIBCPP_HARDENING_MODE_FAST, \ -_LIBCPP_HARDENING_MODE_EXTENSIVE, \ -_LIBCPP_HARDENING_MODE_DEBUG -# endif - -// Hardening assertion semantics generally mirror the evaluation semantics of C++26 Contracts: -// - `ignore` evaluates the assertion but doesn't do anything if it fails (note that it differs from the Contracts -// `ignore` semantic which wouldn't evaluate the assertion at all); -// - `observe` logs an error (indicating, if possible, that the error is fatal) and continues execution; -// - `quick-enforce` terminates the program as fast as possible (via trapping); -// - `enforce` logs an error and then terminates the program. -// -// Notes: -// - Continuing execution after a hardening check fails results in undefined behavior; the `observe` semantic is meant -// to make adopting hardening easier but should not be used outside of this scenario; -// - C++26 wording for Library Hardening precludes a conforming Hardened implementation from using the Contracts -// `ignore` semantic when evaluating hardened preconditions in the Library. Libc++ allows using this semantic for -// hardened preconditions, however, be aware that using `ignore` does not produce a conforming "Hardened" -// implementation, unlike the other semantics above. -// clang-format off -# define _LIBCPP_ASSERTION_SEMANTIC_IGNORE (1 << 1) -# define _LIBCPP_ASSERTION_SEMANTIC_OBSERVE (1 << 2) -# define _LIBCPP_ASSERTION_SEMANTIC_QUICK_ENFORCE (1 << 3) -# define _LIBCPP_ASSERTION_SEMANTIC_ENFORCE (1 << 4) -// clang-format on - -// Allow users to define an arbitrary assertion semantic; otherwise, use the default mapping from modes to semantics. -// The default is for production-capable modes to use `quick-enforce` (i.e., trap) and for the `debug` mode to use -// `enforce` (i.e., log and abort). -# ifndef _LIBCPP_ASSERTION_SEMANTIC - -# if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG -# define _LIBCPP_ASSERTION_SEMANTIC _LIBCPP_ASSERTION_SEMANTIC_ENFORCE -# else -# define _LIBCPP_ASSERTION_SEMANTIC _LIBCPP_ASSERTION_SEMANTIC_QUICK_ENFORCE -# endif - -# else -# if !_LIBCPP_HAS_EXPERIMENTAL_LIBRARY -# error "Assertion semantics are an experimental feature." -# endif -# if defined(_LIBCPP_CXX03_LANG) -# error "Assertion semantics are not available in the C++03 mode." -# endif - -# endif // _LIBCPP_ASSERTION_SEMANTIC - -// } HARDENING - # define _LIBCPP_TOSTRING2(x) #x # define _LIBCPP_TOSTRING(x) _LIBCPP_TOSTRING2(x) diff --git a/libcxx/include/__configuration/experimental.h b/libcxx/include/__configuration/experimental.h new file mode 100644 index 0000000000000..d14df3e5175f3 --- /dev/null +++ b/libcxx/include/__configuration/experimental.h @@ -0,0 +1,37 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_EXPERIMENTAL_H +#define _LIBCPP___CONFIGURATION_EXPERIMENTAL_H + +#include <__config_site> + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +#if __has_feature(experimental_library) +# ifndef _LIBCPP_ENABLE_EXPERIMENTAL +# define _LIBCPP_ENABLE_EXPERIMENTAL +# endif +#endif + +// Incomplete features get their own specific disabling flags. This makes it +// easier to grep for target specific flags once the feature is complete. +#if defined(_LIBCPP_ENABLE_EXPERIMENTAL) || defined(_LIBCPP_BUILDING_LIBRARY) +# define _LIBCPP_HAS_EXPERIMENTAL_LIBRARY 1 +#else +# define _LIBCPP_HAS_EXPERIMENTAL_LIBRARY 0 +#endif + +#define _LIBCPP_HAS_EXPERIMENTAL_PSTL _LIBCPP_HAS_EXPERIMENTAL_LIBRARY +#define _LIBCPP_HAS_EXPERIMENTAL_TZDB _LIBCPP_HAS_EXPERIMENTAL_LIBRARY +#define _LIBCPP_HAS_EXPERIMENTAL_SYNCSTREAM _LIBCPP_HAS_EXPERIMENTAL_LIBRARY +#define _LIBCPP_HAS_EXPERIMENTAL_HARDENING_OBSERVE_SEMANTIC _LIBCPP_HAS_EXPERIMENTAL_LIBRARY + +#endif // _LIBCPP___CONFIGURATION_EXPERIMENTAL_H diff --git a/libcxx/include/__configuration/hardening.h b/libcxx/include/__configuration/hardening.h new file mode 100644 index 0000000000000..bc2a8d078fa77 --- /dev/null +++ b/libcxx/include/__configuration/hardening.h @@ -0,0 +1,181 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_HARDENING_H +#define _LIBCPP___CONFIGURATION_HARDENING_H + +#include <__config_site> +#include <__configuration/experimental.h> +#include <__configuration/language.h> + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +// TODO(LLVM 23): Remove this. We're making these an error to catch folks who might not have migrated. +// Since hardening went through several changes (many of which impacted user-facing macros), +// we're keeping these checks around for a bit longer than usual. Failure to properly configure +// hardening results in checks being dropped silently, which is a pretty big deal. +#if defined(_LIBCPP_ENABLE_ASSERTIONS) +# error "_LIBCPP_ENABLE_ASSERTIONS has been removed, please use _LIBCPP_HARDENING_MODE= instead (see docs)" +#endif +#if defined(_LIBCPP_ENABLE_HARDENED_MODE) +# error "_LIBCPP_ENABLE_HARDENED_MODE has been removed, please use _LIBCPP_HARDENING_MODE= instead (see docs)" +#endif +#if defined(_LIBCPP_ENABLE_SAFE_MODE) +# error "_LIBCPP_ENABLE_SAFE_MODE has been removed, please use _LIBCPP_HARDENING_MODE= instead (see docs)" +#endif +#if defined(_LIBCPP_ENABLE_DEBUG_MODE) +# error "_LIBCPP_ENABLE_DEBUG_MODE has been removed, please use _LIBCPP_HARDENING_MODE= instead (see docs)" +#endif + +// The library provides the macro `_LIBCPP_HARDENING_MODE` which can be set to one of the following values: +// +// - `_LIBCPP_HARDENING_MODE_NONE`; +// - `_LIBCPP_HARDENING_MODE_FAST`; +// - `_LIBCPP_HARDENING_MODE_EXTENSIVE`; +// - `_LIBCPP_HARDENING_MODE_DEBUG`. +// +// These values have the following effects: +// +// - `_LIBCPP_HARDENING_MODE_NONE` -- sets the hardening mode to "none" which disables all runtime hardening checks; +// +// - `_LIBCPP_HARDENING_MODE_FAST` -- sets that hardening mode to "fast". The fast mode enables security-critical checks +// that can be done with relatively little runtime overhead in constant time; +// +// - `_LIBCPP_HARDENING_MODE_EXTENSIVE` -- sets the hardening mode to "extensive". The extensive mode is a superset of +// the fast mode that additionally enables checks that are relatively cheap and prevent common types of logic errors +// but are not necessarily security-critical; +// +// - `_LIBCPP_HARDENING_MODE_DEBUG` -- sets the hardening mode to "debug". The debug mode is a superset of the extensive +// mode and enables all checks available in the library, including internal assertions. Checks that are part of the +// debug mode can be very expensive and thus the debug mode is intended to be used for testing, not in production. + +// Inside the library, assertions are categorized so they can be cherry-picked based on the chosen hardening mode. These +// macros are only for internal use -- users should only pick one of the high-level hardening modes described above. +// +// - `_LIBCPP_ASSERT_VALID_INPUT_RANGE` -- checks that ranges (whether expressed as an iterator pair, an iterator and +// a sentinel, an iterator and a count, or a `std::range`) given as input to library functions are valid: +// - the sentinel is reachable from the begin iterator; +// - TODO(hardening): both iterators refer to the same container. +// +// - `_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS` -- checks that any attempts to access a container element, whether through +// the container object or through an iterator, are valid and do not attempt to go out of bounds or otherwise access +// a non-existent element. For iterator checks to work, bounded iterators must be enabled in the ABI. Types like +// `optional` and `function` are considered one-element containers for the purposes of this check. +// +// - `_LIBCPP_ASSERT_NON_NULL` -- checks that the pointer being dereferenced is not null. On most modern platforms zero +// address does not refer to an actual location in memory, so a null pointer dereference would not compromize the +// memory security of a program (however, it is still undefined behavior that can result in strange errors due to +// compiler optimizations). +// +// - `_LIBCPP_ASSERT_NON_OVERLAPPING_RANGES` -- for functions that take several ranges as arguments, checks that the +// given ranges do not overlap. +// +// - `_LIBCPP_ASSERT_VALID_DEALLOCATION` -- checks that an attempt to deallocate memory is valid (e.g. the given object +// was allocated by the given allocator). Violating this category typically results in a memory leak. +// +// - `_LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL` -- checks that a call to an external API doesn't fail in +// an unexpected manner. This includes triggering documented cases of undefined behavior in an external library (like +// attempting to unlock an unlocked mutex in pthreads). Any API external to the library falls under this category +// (from system calls to compiler intrinsics). We generally don't expect these failures to compromize memory safety or +// otherwise create an immediate security issue. +// +// - `_LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR` -- checks any operations that exchange nodes between containers to make sure +// the containers have compatible allocators. +// +// - `_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN` -- checks that the given argument is within the domain of valid arguments +// for the function. Violating this typically produces an incorrect result (e.g. the clamp algorithm returns the +// original value without clamping it due to incorrect functors) or puts an object into an invalid state (e.g. +// a string view where only a subset of elements is possible to access). This category is for assertions violating +// which doesn't cause any immediate issues in the library -- whatever the consequences are, they will happen in the +// user code. +// +// - `_LIBCPP_ASSERT_PEDANTIC` -- checks prerequisites which are imposed by the Standard, but violating which happens to +// be benign in our implementation. +// +// - `_LIBCPP_ASSERT_SEMANTIC_REQUIREMENT` -- checks that the given argument satisfies the semantic requirements imposed +// by the Standard. Typically, there is no simple way to completely prove that a semantic requirement is satisfied; +// thus, this would often be a heuristic check and it might be quite expensive. +// +// - `_LIBCPP_ASSERT_INTERNAL` -- checks that internal invariants of the library hold. These assertions don't depend on +// user input. +// +// - `_LIBCPP_ASSERT_UNCATEGORIZED` -- for assertions that haven't been properly classified yet. + +// clang-format off +# define _LIBCPP_HARDENING_MODE_NONE (1 << 1) +# define _LIBCPP_HARDENING_MODE_FAST (1 << 2) +# define _LIBCPP_HARDENING_MODE_EXTENSIVE (1 << 4) // Deliberately not ordered. +# define _LIBCPP_HARDENING_MODE_DEBUG (1 << 3) +// clang-format on + +#ifndef _LIBCPP_HARDENING_MODE + +# ifndef _LIBCPP_HARDENING_MODE_DEFAULT +# error _LIBCPP_HARDENING_MODE_DEFAULT is not defined. This definition should be set at configuration time in the \ +`__config_site` header, please make sure your installation of libc++ is not broken. +# endif + +# define _LIBCPP_HARDENING_MODE _LIBCPP_HARDENING_MODE_DEFAULT +#endif + +#if _LIBCPP_HARDENING_MODE != _LIBCPP_HARDENING_MODE_NONE && _LIBCPP_HARDENING_MODE != _LIBCPP_HARDENING_MODE_FAST && \ + _LIBCPP_HARDENING_MODE != _LIBCPP_HARDENING_MODE_EXTENSIVE && \ + _LIBCPP_HARDENING_MODE != _LIBCPP_HARDENING_MODE_DEBUG +# error _LIBCPP_HARDENING_MODE must be set to one of the following values: \ +_LIBCPP_HARDENING_MODE_NONE, \ +_LIBCPP_HARDENING_MODE_FAST, \ +_LIBCPP_HARDENING_MODE_EXTENSIVE, \ +_LIBCPP_HARDENING_MODE_DEBUG +#endif + +// Hardening assertion semantics generally mirror the evaluation semantics of C++26 Contracts: +// - `ignore` evaluates the assertion but doesn't do anything if it fails (note that it differs from the Contracts +// `ignore` semantic which wouldn't evaluate the assertion at all); +// - `observe` logs an error (indicating, if possible, that the error is fatal) and continues execution; +// - `quick-enforce` terminates the program as fast as possible (via trapping); +// - `enforce` logs an error and then terminates the program. +// +// Notes: +// - Continuing execution after a hardening check fails results in undefined behavior; the `observe` semantic is meant +// to make adopting hardening easier but should not be used outside of this scenario; +// - C++26 wording for Library Hardening precludes a conforming Hardened implementation from using the Contracts +// `ignore` semantic when evaluating hardened preconditions in the Library. Libc++ allows using this semantic for +// hardened preconditions, however, be aware that using `ignore` does not produce a conforming "Hardened" +// implementation, unlike the other semantics above. +// clang-format off +# define _LIBCPP_ASSERTION_SEMANTIC_IGNORE (1 << 1) +# define _LIBCPP_ASSERTION_SEMANTIC_OBSERVE (1 << 2) +# define _LIBCPP_ASSERTION_SEMANTIC_QUICK_ENFORCE (1 << 3) +# define _LIBCPP_ASSERTION_SEMANTIC_ENFORCE (1 << 4) +// clang-format on + +// Allow users to define an arbitrary assertion semantic; otherwise, use the default mapping from modes to semantics. +// The default is for production-capable modes to use `quick-enforce` (i.e., trap) and for the `debug` mode to use +// `enforce` (i.e., log and abort). +#ifndef _LIBCPP_ASSERTION_SEMANTIC + +# if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG +# define _LIBCPP_ASSERTION_SEMANTIC _LIBCPP_ASSERTION_SEMANTIC_ENFORCE +# else +# define _LIBCPP_ASSERTION_SEMANTIC _LIBCPP_ASSERTION_SEMANTIC_QUICK_ENFORCE +# endif + +#else + +# if !_LIBCPP_HAS_EXPERIMENTAL_LIBRARY +# error "Assertion semantics are an experimental feature." +# endif +# if defined(_LIBCPP_CXX03_LANG) +# error "Assertion semantics are not available in the C++03 mode." +# endif + +#endif // _LIBCPP_ASSERTION_SEMANTIC + +#endif // _LIBCPP___CONFIGURATION_HARDENING_H diff --git a/libcxx/include/__configuration/language.h b/libcxx/include/__configuration/language.h index 9c224dfa76e40..26e87f87afd87 100644 --- a/libcxx/include/__configuration/language.h +++ b/libcxx/include/__configuration/language.h @@ -18,6 +18,9 @@ // NOLINTBEGIN(libcpp-cpp-version-check) #ifdef __cplusplus +# if __cplusplus < 201103L +# define _LIBCPP_CXX03_LANG +# endif # if __cplusplus <= 201103L # define _LIBCPP_STD_VER 11 # elif __cplusplus <= 201402L diff --git a/libcxx/include/__vector/vector.h b/libcxx/include/__vector/vector.h index 316d3a9d10eff..a100b1675516e 100644 --- a/libcxx/include/__vector/vector.h +++ b/libcxx/include/__vector/vector.h @@ -664,9 +664,6 @@ class vector { _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void __construct_at_end(_InputIterator __first, _Sentinel __last, size_type __n); - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void __append(size_type __n); - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void __append(size_type __n, const_reference __x); - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI iterator __make_iter(pointer __p) _NOEXCEPT { #ifdef _LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR // Bound the iterator according to the capacity, rather than the size. @@ -971,36 +968,6 @@ vector<_Tp, _Allocator>::__construct_at_end(_InputIterator __first, _Sentinel __ __tx.__pos_ = std::__uninitialized_allocator_copy(this->__alloc_, std::move(__first), std::move(__last), __tx.__pos_); } -// Default constructs __n objects starting at __end_ -// throws if construction throws -// Postcondition: size() == size() + __n -// Exception safety: strong. -template -_LIBCPP_CONSTEXPR_SINCE_CXX20 void vector<_Tp, _Allocator>::__append(size_type __n) { - if (static_cast(this->__cap_ - this->__end_) >= __n) - this->__construct_at_end(__n); - else { - __split_buffer __v(__recommend(size() + __n), size(), this->__alloc_); - __v.__construct_at_end(__n); - __swap_out_circular_buffer(__v); - } -} - -// Default constructs __n objects starting at __end_ -// throws if construction throws -// Postcondition: size() == size() + __n -// Exception safety: strong. -template -_LIBCPP_CONSTEXPR_SINCE_CXX20 void vector<_Tp, _Allocator>::__append(size_type __n, const_reference __x) { - if (static_cast(this->__cap_ - this->__end_) >= __n) - this->__construct_at_end(__n, __x); - else { - __split_buffer __v(__recommend(size() + __n), size(), this->__alloc_); - __v.__construct_at_end(__n, __x); - __swap_out_circular_buffer(__v); - } -} - template _LIBCPP_CONSTEXPR_SINCE_CXX20 inline _LIBCPP_HIDE_FROM_ABI vector<_Tp, _Allocator>::vector(vector&& __x) #if _LIBCPP_STD_VER >= 17 @@ -1402,21 +1369,35 @@ vector<_Tp, _Allocator>::__insert_with_size( } template -_LIBCPP_CONSTEXPR_SINCE_CXX20 void vector<_Tp, _Allocator>::resize(size_type __sz) { - size_type __cs = size(); - if (__cs < __sz) - this->__append(__sz - __cs); - else if (__cs > __sz) - this->__destruct_at_end(this->__begin_ + __sz); +_LIBCPP_CONSTEXPR_SINCE_CXX20 void vector<_Tp, _Allocator>::resize(size_type __new_size) { + size_type __current_size = size(); + if (__current_size < __new_size) { + if (__new_size <= capacity()) { + __construct_at_end(__new_size - __current_size); + } else { + __split_buffer __v(__recommend(__new_size), __current_size, __alloc_); + __v.__construct_at_end(__new_size - __current_size); + __swap_out_circular_buffer(__v); + } + } else if (__current_size > __new_size) { + this->__destruct_at_end(this->__begin_ + __new_size); + } } template -_LIBCPP_CONSTEXPR_SINCE_CXX20 void vector<_Tp, _Allocator>::resize(size_type __sz, const_reference __x) { - size_type __cs = size(); - if (__cs < __sz) - this->__append(__sz - __cs, __x); - else if (__cs > __sz) - this->__destruct_at_end(this->__begin_ + __sz); +_LIBCPP_CONSTEXPR_SINCE_CXX20 void vector<_Tp, _Allocator>::resize(size_type __new_size, const_reference __x) { + size_type __current_size = size(); + if (__current_size < __new_size) { + if (__new_size <= capacity()) + __construct_at_end(__new_size - __current_size, __x); + else { + __split_buffer __v(__recommend(__new_size), __current_size, __alloc_); + __v.__construct_at_end(__new_size - __current_size, __x); + __swap_out_circular_buffer(__v); + } + } else if (__current_size > __new_size) { + this->__destruct_at_end(this->__begin_ + __new_size); + } } template diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in index 2266a1d1d4c1c..85fe88afe248c 100644 --- a/libcxx/include/module.modulemap.in +++ b/libcxx/include/module.modulemap.in @@ -6,6 +6,8 @@ module std_config [system] { textual header "__configuration/abi.h" textual header "__configuration/availability.h" textual header "__configuration/compiler.h" + textual header "__configuration/experimental.h" + textual header "__configuration/hardening.h" textual header "__configuration/language.h" textual header "__configuration/platform.h" textual header "version" diff --git a/libcxx/utils/generate_feature_test_macro_components.py b/libcxx/utils/generate_feature_test_macro_components.py index 22209f53d50d7..82a1785a0c906 100644 --- a/libcxx/utils/generate_feature_test_macro_components.py +++ b/libcxx/utils/generate_feature_test_macro_components.py @@ -14,7 +14,7 @@ ) import functools import json -from libcxx.header_information import module_c_headers, module_headers, header_restrictions, headers_not_available, libcxx_root +from libcxx.header_information import headers_not_available def get_libcxx_paths(): diff --git a/libcxxabi/include/__cxxabi_config.h b/libcxxabi/include/__cxxabi_config.h index f5101dbc9e599..e4fd845b1fb35 100644 --- a/libcxxabi/include/__cxxabi_config.h +++ b/libcxxabi/include/__cxxabi_config.h @@ -14,10 +14,6 @@ #define _LIBCXXABI_ARM_EHABI #endif -#if !defined(__has_attribute) -#define __has_attribute(_attribute_) 0 -#endif - #if defined(__clang__) # define _LIBCXXABI_COMPILER_CLANG # ifndef __apple_build_version__ @@ -25,10 +21,6 @@ # endif #elif defined(__GNUC__) # define _LIBCXXABI_COMPILER_GCC -#elif defined(_MSC_VER) -# define _LIBCXXABI_COMPILER_MSVC -#elif defined(__IBMCPP__) -# define _LIBCXXABI_COMPILER_IBM #endif #if defined(_WIN32) @@ -66,17 +58,7 @@ #endif #endif -#if defined(_LIBCXXABI_COMPILER_MSVC) -#define _LIBCXXABI_WEAK -#else #define _LIBCXXABI_WEAK __attribute__((__weak__)) -#endif - -#if defined(__clang__) -#define _LIBCXXABI_COMPILER_CLANG -#elif defined(__GNUC__) -#define _LIBCXXABI_COMPILER_GCC -#endif #if __has_attribute(__no_sanitize__) && defined(_LIBCXXABI_COMPILER_CLANG) #define _LIBCXXABI_NO_CFI __attribute__((__no_sanitize__("cfi"))) @@ -89,11 +71,7 @@ # define _LIBCXXABI_GUARD_ABI_ARM #endif -#if defined(_LIBCXXABI_COMPILER_CLANG) -# if !__has_feature(cxx_exceptions) -# define _LIBCXXABI_NO_EXCEPTIONS -# endif -#elif defined(_LIBCXXABI_COMPILER_GCC) && !defined(__EXCEPTIONS) +#if !defined(__cpp_exceptions) || __cpp_exceptions < 199711L # define _LIBCXXABI_NO_EXCEPTIONS #endif diff --git a/llvm/docs/SandboxIR.md b/llvm/docs/SandboxIR.md index d2b612ba95ef1..dbf488bba735c 100644 --- a/llvm/docs/SandboxIR.md +++ b/llvm/docs/SandboxIR.md @@ -8,8 +8,8 @@ Within your LLVM pass: ``` C++ // 1. Include the necessary Sandbox IR header files. -#include "llvm/SandboxIR/Context.h -#include "llvm/SandboxIR/Function.h +#include "llvm/SandboxIR/Context.h" +#include "llvm/SandboxIR/Function.h" // 2. Create a sandboxir::Context using LLVMContext `LLVMCtx`. sandboxir::Context Ctx(LLVMCtx); diff --git a/llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h b/llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h deleted file mode 100644 index b44edd370dd1c..0000000000000 --- a/llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h +++ /dev/null @@ -1,47 +0,0 @@ -//===- InlineSizeEstimatorAnalysis.h - ML size estimator --------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// - -#ifndef LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H -#define LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H - -#include "llvm/IR/PassManager.h" - -namespace llvm { -class Function; - -class TFModelEvaluator; -class InlineSizeEstimatorAnalysis - : public AnalysisInfoMixin { -public: - InlineSizeEstimatorAnalysis(); - InlineSizeEstimatorAnalysis(InlineSizeEstimatorAnalysis &&); - ~InlineSizeEstimatorAnalysis(); - - static AnalysisKey Key; - using Result = std::optional; - Result run(const Function &F, FunctionAnalysisManager &FAM); - static bool isEvaluatorRequested(); - -private: - std::unique_ptr Evaluator; -}; - -class InlineSizeEstimatorAnalysisPrinterPass - : public PassInfoMixin { - raw_ostream &OS; - -public: - explicit InlineSizeEstimatorAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {} - - PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); - - static bool isRequired() { return true; } -}; -} // namespace llvm -#endif // LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.h b/llvm/include/llvm/Analysis/TargetLibraryInfo.h index f783a82d800c0..78954431e81c3 100644 --- a/llvm/include/llvm/Analysis/TargetLibraryInfo.h +++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.h @@ -23,6 +23,7 @@ namespace llvm { template class ArrayRef; +enum class VectorLibrary; /// Provides info so a possible vectorization of a function can be /// computed. Function 'VectorFnName' is equivalent to 'ScalarFnName' @@ -117,25 +118,6 @@ class TargetLibraryInfoImpl { const Module &M) const; public: - /// List of known vector-functions libraries. - /// - /// The vector-functions library defines, which functions are vectorizable - /// and with which factor. The library can be specified by either frontend, - /// or a commandline option, and then used by - /// addVectorizableFunctionsFromVecLib for filling up the tables of - /// vectorizable functions. - enum VectorLibrary { - NoLibrary, // Don't use any vector library. - Accelerate, // Use Accelerate framework. - DarwinLibSystemM, // Use Darwin's libsystem_m. - LIBMVEC, // GLIBC Vector Math library. - MASSV, // IBM MASS vector library. - SVML, // Intel short vector math library. - SLEEFGNUABI, // SLEEF - SIMD Library for Evaluating Elementary Functions. - ArmPL, // Arm Performance Libraries. - AMDLIBM // AMD Math Vector library. - }; - TargetLibraryInfoImpl() = delete; LLVM_ABI explicit TargetLibraryInfoImpl(const Triple &T); diff --git a/llvm/include/llvm/Bitcode/BitcodeWriter.h b/llvm/include/llvm/Bitcode/BitcodeWriter.h index 1e72e847137a3..d88e261f8c684 100644 --- a/llvm/include/llvm/Bitcode/BitcodeWriter.h +++ b/llvm/include/llvm/Bitcode/BitcodeWriter.h @@ -20,7 +20,6 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/MemoryBufferRef.h" #include -#include #include namespace llvm { diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index f58525754d7a5..1c167af4b0478 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -316,12 +316,22 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { EVT ScalarVT = VT.getScalarType(); RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; + /// Migration flag. IsVectorCall cases directly know about the vector + /// libcall in RuntimeLibcallsInfo and shouldn't try to use + /// LibInfo->getVectorMappingInfo. + bool IsVectorCall = false; + switch (ICA.getID()) { case Intrinsic::modf: LC = RTLIB::getMODF(ScalarVT); break; case Intrinsic::sincospi: - LC = RTLIB::getSINCOSPI(ScalarVT); + LC = RTLIB::getSINCOSPI(VT); + if (LC == RTLIB::UNKNOWN_LIBCALL) + LC = RTLIB::getSINCOSPI(ScalarVT); + else if (VT.isVector()) + IsVectorCall = true; + break; case Intrinsic::sincos: LC = RTLIB::getSINCOS(ScalarVT); @@ -345,17 +355,23 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { LLVMContext &Ctx = RetTy->getContext(); ElementCount VF = getVectorizedTypeVF(RetTy); VecDesc const *VD = nullptr; - for (bool Masked : {false, true}) { - if ((VD = LibInfo->getVectorMappingInfo(LCName, VF, Masked))) - break; + + if (!IsVectorCall) { + for (bool Masked : {false, true}) { + if ((VD = LibInfo->getVectorMappingInfo(LCName, VF, Masked))) + break; + } + if (!VD) + return std::nullopt; } - if (!VD) - return std::nullopt; // Cost the call + mask. auto Cost = thisT()->getCallInstrCost(nullptr, RetTy, ICA.getArgTypes(), CostKind); - if (VD->isMasked()) { + + if ((VD && VD->isMasked()) || + (IsVectorCall && + RTLIB::RuntimeLibcallsInfo::hasVectorMaskArgument(LibcallImpl))) { auto VecTy = VectorType::get(IntegerType::getInt1Ty(Ctx), VF); Cost += thisT()->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, VecTy, {}, CostKind, 0, nullptr, {}); diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h index 71739278cf513..fcf7bab09fcff 100644 --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -129,7 +129,7 @@ class MachineBasicBlock MCRegister PhysReg; LaneBitmask LaneMask; - RegisterMaskPair(MCPhysReg PhysReg, LaneBitmask LaneMask) + RegisterMaskPair(MCRegister PhysReg, LaneBitmask LaneMask) : PhysReg(PhysReg), LaneMask(LaneMask) {} bool operator==(const RegisterMaskPair &other) const { diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h index 0dd4f23c6d85f..62d2f222110e4 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -1725,9 +1725,17 @@ class SelectionDAG { /// value. LLVM_ABI bool expandMultipleResultFPLibCall(RTLIB::Libcall LC, SDNode *Node, - SmallVectorImpl &Results, + SmallVectorImpl &Results, EVT CallType, std::optional CallRetResNo = {}); + // FIXME: Ths should be removed, and form using RTLIB::Libcall should be + // preferred. Callers should resolve the exact type libcall to use. + LLVM_ABI bool + expandMultipleResultFPLibCall(StringRef LibcallName, CallingConv::ID CC, + SDNode *Node, SmallVectorImpl &Results, + std::optional CallRetResNo = {}, + bool IsVectorMasked = false); + /// Expand the specified \c ISD::VAARG node as the Legalize pass would. LLVM_ABI SDValue expandVAArg(SDNode *Node); @@ -2072,6 +2080,10 @@ class SelectionDAG { /// We use this predicate to simplify operations downstream. LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const; + /// Return true if the sign bit of Op is known to be zero, for a + /// floating-point value. + LLVM_ABI bool SignBitIsZeroFP(SDValue Op, unsigned Depth = 0) const; + /// Return true if 'Op & Mask' is known to be zero. We /// use this predicate to simplify operations downstream. Op and Mask are /// known to be the same type. diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h index a8c7a8aff83cf..a1a130aa27798 100644 --- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h +++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h @@ -210,6 +210,10 @@ class LLVM_ABI TargetSubtargetInfo : public MCSubtargetInfo { /// can be overridden. virtual bool enableJoinGlobalCopies() const; + /// Hack to bring up option. This should be unconditionally true, all targets + /// should enable it and delete this. + virtual bool enableTerminalRule() const { return false; } + /// True if the subtarget should run a scheduler after register allocation. /// /// By default this queries the PostRAScheduling bit in the scheduling model diff --git a/llvm/include/llvm/DebugInfo/GSYM/GsymContext.h b/llvm/include/llvm/DebugInfo/GSYM/GsymContext.h index e3e9b2bb91e8a..f9382fa8d9577 100644 --- a/llvm/include/llvm/DebugInfo/GSYM/GsymContext.h +++ b/llvm/include/llvm/DebugInfo/GSYM/GsymContext.h @@ -12,7 +12,6 @@ #include "llvm/DebugInfo/DIContext.h" #include #include -#include namespace llvm { diff --git a/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h index be8cb927c26df..fc01afc6d8739 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h @@ -45,7 +45,6 @@ #include #include #include -#include #include namespace llvm { diff --git a/llvm/include/llvm/IR/DebugInfo.h b/llvm/include/llvm/IR/DebugInfo.h index 33e6df0ecb873..862293c9666a7 100644 --- a/llvm/include/llvm/IR/DebugInfo.h +++ b/llvm/include/llvm/IR/DebugInfo.h @@ -108,7 +108,7 @@ class DebugInfoFinder { LLVM_ABI void processInstruction(const Module &M, const Instruction &I); /// Process a DILocalVariable. - LLVM_ABI void processVariable(DILocalVariable *DVI); + LLVM_ABI void processVariable(const DILocalVariable *DVI); /// Process debug info location. LLVM_ABI void processLocation(const Module &M, const DILocation *Loc); /// Process a DbgRecord. @@ -124,7 +124,7 @@ class DebugInfoFinder { void processCompileUnit(DICompileUnit *CU); void processScope(DIScope *Scope); void processType(DIType *DT); - void processImportedEntity(DIImportedEntity *Import); + void processImportedEntity(const DIImportedEntity *Import); bool addCompileUnit(DICompileUnit *CU); bool addGlobalVariable(DIGlobalVariableExpression *DIG); bool addScope(DIScope *Scope); diff --git a/llvm/include/llvm/IR/DebugInfoMetadata.h b/llvm/include/llvm/IR/DebugInfoMetadata.h index 0270f9f20e48c..20fa9d15bf5ff 100644 --- a/llvm/include/llvm/IR/DebugInfoMetadata.h +++ b/llvm/include/llvm/IR/DebugInfoMetadata.h @@ -2568,6 +2568,39 @@ class DISubprogram : public DILocalScope { replaceOperandWith(7, N.get()); } + /// For the given retained node of DISubprogram, applies one of the + /// given functions depending on the type of the node. + template + static T + visitRetainedNode(const Metadata *N, FuncLVT &&FuncLV, FuncLabelT &&FuncLabel, + FuncImportedEntityT &&FuncIE, FuncUnknownT &&FuncUnknown) { + if (const auto *LV = dyn_cast(N)) + return FuncLV(LV); + if (const auto *L = dyn_cast(N)) + return FuncLabel(L); + if (const auto *IE = dyn_cast(N)) + return FuncIE(IE); + return FuncUnknown(N); + } + + /// Returns the scope of subprogram's retainedNodes. + static const DILocalScope *getRetainedNodeScope(const MDNode *N); + // For use in Verifier. + static const DIScope *getRawRetainedNodeScope(const MDNode *N); + + /// For each retained node, applies one of the given functions depending + /// on the type of a node. + template + void forEachRetainedNode(FuncLVT &&FuncLV, FuncLabelT &&FuncLabel, + FuncImportedEntityT &&FuncIE) const { + for (MDNode *N : getRetainedNodes()) + visitRetainedNode(N, FuncLV, FuncLabel, FuncIE, + [](const Metadata *N) { + llvm_unreachable("Unexpected retained node!"); + }); + } + /// Check if this subprogram describes the given function. /// /// FIXME: Should this be looking through bitcasts? diff --git a/llvm/include/llvm/IR/MemoryModelRelaxationAnnotations.h b/llvm/include/llvm/IR/MemoryModelRelaxationAnnotations.h index 535635a9ad9b0..fcfb2db85a880 100644 --- a/llvm/include/llvm/IR/MemoryModelRelaxationAnnotations.h +++ b/llvm/include/llvm/IR/MemoryModelRelaxationAnnotations.h @@ -21,7 +21,8 @@ #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Compiler.h" -#include // for std::pair + +#include namespace llvm { diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.h b/llvm/include/llvm/IR/RuntimeLibcalls.h index c822b6530a441..0afe32a4ecc3c 100644 --- a/llvm/include/llvm/IR/RuntimeLibcalls.h +++ b/llvm/include/llvm/IR/RuntimeLibcalls.h @@ -83,16 +83,7 @@ struct RuntimeLibcallsInfo { const Triple &TT, ExceptionHandling ExceptionModel = ExceptionHandling::None, FloatABI::ABIType FloatABI = FloatABI::Default, - EABI EABIVersion = EABI::Default, StringRef ABIName = "") { - // FIXME: The ExceptionModel parameter is to handle the field in - // TargetOptions. This interface fails to distinguish the forced disable - // case for targets which support exceptions by default. This should - // probably be a module flag and removed from TargetOptions. - if (ExceptionModel == ExceptionHandling::None) - ExceptionModel = TT.getDefaultExceptionHandling(); - - initLibcalls(TT, ExceptionModel, FloatABI, EABIVersion, ABIName); - } + EABI EABIVersion = EABI::Default, StringRef ABIName = ""); explicit RuntimeLibcallsInfo(const Module &M); @@ -170,6 +161,10 @@ struct RuntimeLibcallsInfo { getFunctionTy(LLVMContext &Ctx, const Triple &TT, const DataLayout &DL, RTLIB::LibcallImpl LibcallImpl) const; + /// Returns true if the function has a vector mask argument, which is assumed + /// to be the last argument. + static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl); + private: LLVM_ABI static iota_range lookupLibcallImplNameImpl(StringRef Name); diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.td b/llvm/include/llvm/IR/RuntimeLibcalls.td index 24c1b035d0dda..ee80606ed0dbf 100644 --- a/llvm/include/llvm/IR/RuntimeLibcalls.td +++ b/llvm/include/llvm/IR/RuntimeLibcalls.td @@ -182,6 +182,11 @@ foreach FPTy = ["F32", "F64", "F80", "F128", "PPCF128"] in { def MODF_#FPTy : RuntimeLibcall; } +foreach VecTy = ["V4F32", "V2F64", "NXV4F32", "NXV2F64"] in { + def SINCOS_#VecTy : RuntimeLibcall; + def SINCOSPI_#VecTy : RuntimeLibcall; +} + def FEGETENV : RuntimeLibcall; def FESETENV : RuntimeLibcall; @@ -971,10 +976,6 @@ def frexpf : RuntimeLibcallImpl; def frexp : RuntimeLibcallImpl; defm frexpl : LibmLongDoubleLibCall; -def sincospif : RuntimeLibcallImpl; -def sincospi : RuntimeLibcallImpl; -defm sincospil : LibmLongDoubleLibCall; - def modff : RuntimeLibcallImpl; def modf : RuntimeLibcallImpl; defm modfl : LibmLongDoubleLibCall; @@ -1051,6 +1052,15 @@ def sincosf : RuntimeLibcallImpl; def sincos : RuntimeLibcallImpl; defm sincosl : LibmLongDoubleLibCall; +// Exists in sun math library +def sincospif : RuntimeLibcallImpl; +def sincospi : RuntimeLibcallImpl; +defm sincospil : LibmLongDoubleLibCall; + +// Exists on macOS +def __sincospif : RuntimeLibcallImpl; +def __sincospi : RuntimeLibcallImpl; + def bzero : RuntimeLibcallImpl; def __bzero : RuntimeLibcallImpl; @@ -1078,6 +1088,40 @@ def __security_check_cookie : RuntimeLibcallImpl; def __security_check_cookie_arm64ec : RuntimeLibcallImpl; +//===----------------------------------------------------------------------===// +// sleef calls +//===----------------------------------------------------------------------===// + +defset list SleefLibcalls = { + def _ZGVnN2vl8l8_sincos : RuntimeLibcallImpl; + def _ZGVnN4vl4l4_sincosf : RuntimeLibcallImpl; + def _ZGVsNxvl8l8_sincos : RuntimeLibcallImpl; + def _ZGVsNxvl4l4_sincosf : RuntimeLibcallImpl; + + def _ZGVnN4vl4l4_sincospif : RuntimeLibcallImpl; + def _ZGVnN2vl8l8_sincospi : RuntimeLibcallImpl; + def _ZGVsNxvl4l4_sincospif : RuntimeLibcallImpl; + def _ZGVsNxvl8l8_sincospi : RuntimeLibcallImpl; +} + +//===----------------------------------------------------------------------===// +// ARMPL calls +//===----------------------------------------------------------------------===// + +defset list ARMPLLibcalls = { + def armpl_vsincosq_f64 + : RuntimeLibcallImpl; // CallingConv::AArch64_VectorCall + def armpl_vsincosq_f32 + : RuntimeLibcallImpl; // CallingConv::AArch64_VectorCall + def armpl_svsincos_f64_x : RuntimeLibcallImpl; + def armpl_svsincos_f32_x : RuntimeLibcallImpl; + + def armpl_vsincospiq_f32 : RuntimeLibcallImpl; + def armpl_vsincospiq_f64 : RuntimeLibcallImpl; + def armpl_svsincospi_f32_x : RuntimeLibcallImpl; + def armpl_svsincospi_f64_x : RuntimeLibcallImpl; +} + //===----------------------------------------------------------------------===// // F128 libm Runtime Libcalls //===----------------------------------------------------------------------===// @@ -1206,7 +1250,9 @@ defvar DefaultLibcallImpls32 = (add DefaultRuntimeLibcallImpls); defvar DefaultLibcallImpls64 = (add DefaultRuntimeLibcallImpls, Int128RTLibcalls); -defvar DarwinSinCosStret = LibcallImpls<(add __sincosf_stret, __sincos_stret), +// TODO: Guessing sincospi added at same time as sincos_stret +defvar DarwinSinCosStret = LibcallImpls<(add __sincosf_stret, __sincos_stret, + __sincospif, __sincospi), darwinHasSinCosStret>; defvar DarwinExp10 = LibcallImpls<(add __exp10f, __exp10), darwinHasExp10>; @@ -2333,7 +2379,7 @@ defset list PPCRuntimeLibcalls = { defset list PPC64AIXCallList = { def ___memcmp64 : RuntimeLibcallImpl; - def ___memmove64 : RuntimeLibcallImpl; + def ___memmove64 : RuntimeLibcallImpl; def ___memset64 : RuntimeLibcallImpl; def ___bzero64 : RuntimeLibcallImpl; def ___strlen64 : RuntimeLibcallImpl; diff --git a/llvm/include/llvm/IR/SystemLibraries.h b/llvm/include/llvm/IR/SystemLibraries.h new file mode 100644 index 0000000000000..1713b07c1c86f --- /dev/null +++ b/llvm/include/llvm/IR/SystemLibraries.h @@ -0,0 +1,39 @@ +//===------------------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_IR_SYSTEMLIBRARIES_H +#define LLVM_IR_SYSTEMLIBRARIES_H + +namespace llvm { +/// List of known vector-functions libraries. +/// +/// The vector-functions library defines, which functions are vectorizable +/// and with which factor. The library can be specified by either frontend, +/// or a commandline option, and then used by +/// addVectorizableFunctionsFromVecLib for filling up the tables of +/// vectorizable functions. +enum class VectorLibrary { + NoLibrary, // Don't use any vector library. + Accelerate, // Use Accelerate framework. + DarwinLibSystemM, // Use Darwin's libsystem_m. + LIBMVEC, // GLIBC Vector Math library. + MASSV, // IBM MASS vector library. + SVML, // Intel short vector math library. + SLEEFGNUABI, // SLEEF - SIMD Library for Evaluating Elementary Functions. + ArmPL, // Arm Performance Libraries. + AMDLIBM // AMD Math Vector library. +}; + +/// Command line flag value for the vector math library to use +/// +/// FIXME: This should come from a module flag, and not be mutually exclusive +extern VectorLibrary ClVectorLibrary; + +} // namespace llvm + +#endif // LLVM_IR_SYSTEMLIBRARIES_H diff --git a/llvm/include/llvm/MC/DXContainerPSVInfo.h b/llvm/include/llvm/MC/DXContainerPSVInfo.h index 3a2d2949d0223..eb6d9e14d92c3 100644 --- a/llvm/include/llvm/MC/DXContainerPSVInfo.h +++ b/llvm/include/llvm/MC/DXContainerPSVInfo.h @@ -17,7 +17,6 @@ #include "llvm/TargetParser/Triple.h" #include -#include #include namespace llvm { diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h index 152b81e284c1a..dbae271a1c198 100644 --- a/llvm/include/llvm/MC/MCAssembler.h +++ b/llvm/include/llvm/MC/MCAssembler.h @@ -24,7 +24,6 @@ #include #include #include -#include #include namespace llvm { diff --git a/llvm/include/llvm/Remarks/YAMLRemarkSerializer.h b/llvm/include/llvm/Remarks/YAMLRemarkSerializer.h index 69b8f9f000e1d..af9d809833023 100644 --- a/llvm/include/llvm/Remarks/YAMLRemarkSerializer.h +++ b/llvm/include/llvm/Remarks/YAMLRemarkSerializer.h @@ -16,7 +16,6 @@ #include "llvm/Remarks/RemarkSerializer.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/YAMLTraits.h" -#include namespace llvm { namespace remarks { diff --git a/llvm/include/llvm/Support/FormatVariadic.h b/llvm/include/llvm/Support/FormatVariadic.h index 85652924491ba..fdd448f7b5a3a 100644 --- a/llvm/include/llvm/Support/FormatVariadic.h +++ b/llvm/include/llvm/Support/FormatVariadic.h @@ -37,7 +37,6 @@ #include "llvm/Support/raw_ostream.h" #include #include -#include #include #include #include diff --git a/llvm/include/llvm/Support/Jobserver.h b/llvm/include/llvm/Support/Jobserver.h index 6bee3b5671d55..3c0c04537735d 100644 --- a/llvm/include/llvm/Support/Jobserver.h +++ b/llvm/include/llvm/Support/Jobserver.h @@ -68,7 +68,6 @@ #include "llvm/ADT/StringRef.h" #include -#include namespace llvm { diff --git a/llvm/include/llvm/Transforms/Scalar/JumpThreading.h b/llvm/include/llvm/Transforms/Scalar/JumpThreading.h index a03a38466b27b..1a19eb94e60ea 100644 --- a/llvm/include/llvm/Transforms/Scalar/JumpThreading.h +++ b/llvm/include/llvm/Transforms/Scalar/JumpThreading.h @@ -24,7 +24,6 @@ #include "llvm/IR/ValueHandle.h" #include "llvm/Support/Compiler.h" #include "llvm/Transforms/Utils/ValueMapper.h" -#include #include namespace llvm { diff --git a/llvm/include/llvm/Transforms/Scalar/Scalarizer.h b/llvm/include/llvm/Transforms/Scalar/Scalarizer.h index 12513c2a704f2..35c9adbe17677 100644 --- a/llvm/include/llvm/Transforms/Scalar/Scalarizer.h +++ b/llvm/include/llvm/Transforms/Scalar/Scalarizer.h @@ -20,7 +20,6 @@ #include "llvm/IR/PassManager.h" #include "llvm/Support/Compiler.h" -#include namespace llvm { diff --git a/llvm/include/llvm/Transforms/Utils/LowerVectorIntrinsics.h b/llvm/include/llvm/Transforms/Utils/LowerVectorIntrinsics.h index cb48bb01e178a..19b573d6546a0 100644 --- a/llvm/include/llvm/Transforms/Utils/LowerVectorIntrinsics.h +++ b/llvm/include/llvm/Transforms/Utils/LowerVectorIntrinsics.h @@ -14,7 +14,6 @@ #define LLVM_TRANSFORMS_UTILS_LOWERVECTORINTRINSICS_H #include -#include namespace llvm { diff --git a/llvm/include/llvm/Transforms/Utils/SplitModuleByCategory.h b/llvm/include/llvm/Transforms/Utils/SplitModuleByCategory.h index cfcd1611e27fe..47aa2ff5930b0 100644 --- a/llvm/include/llvm/Transforms/Utils/SplitModuleByCategory.h +++ b/llvm/include/llvm/Transforms/Utils/SplitModuleByCategory.h @@ -16,7 +16,6 @@ #include #include -#include namespace llvm { diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt index 88ebd65ec46af..bff9b62d98e06 100644 --- a/llvm/lib/Analysis/CMakeLists.txt +++ b/llvm/lib/Analysis/CMakeLists.txt @@ -89,7 +89,6 @@ add_llvm_component_library(LLVMAnalysis InlineCost.cpp InlineAdvisor.cpp InlineOrder.cpp - InlineSizeEstimatorAnalysis.cpp InstCount.cpp InstructionPrecedenceTracking.cpp InstructionSimplify.cpp diff --git a/llvm/lib/Analysis/DXILResource.cpp b/llvm/lib/Analysis/DXILResource.cpp index 27114e0705a1d..033f516abe017 100644 --- a/llvm/lib/Analysis/DXILResource.cpp +++ b/llvm/lib/Analysis/DXILResource.cpp @@ -23,7 +23,6 @@ #include "llvm/Support/DXILABI.h" #include "llvm/Support/FormatVariadic.h" #include -#include #define DEBUG_TYPE "dxil-resource" diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp index 67e38ab8b35aa..d2be805a6f7a5 100644 --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -16,7 +16,6 @@ #include "llvm/ADT/BitVector.h" #include "llvm/Analysis/CallGraph.h" -#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h" #include "llvm/Analysis/MLInlineAdvisor.h" #include "llvm/Analysis/ModelUnderTrainingRunner.h" #include "llvm/Analysis/NoInferenceModelRunner.h" @@ -89,9 +88,6 @@ struct InlineEvent { /// error, even if AdvisedDecision were true, otherwise it agrees with /// AdvisedDecision. bool Effect = false; - - /// What the change in size was: size_after - size_before - int64_t Reward = 0; }; /// Collect data we may use for training a model. @@ -150,31 +146,15 @@ class DevelopmentModeMLInlineAdvisor : public MLInlineAdvisor { GetModelRunner, std::function GetDefaultAdvice); - size_t getTotalSizeEstimate(); - - void updateNativeSizeEstimate(int64_t Change) { - *CurrentNativeSize += Change; - } - void resetNativeSize(Function *F) { - PreservedAnalyses PA = PreservedAnalyses::all(); - PA.abandon(); - FAM.invalidate(*F, PA); - } - std::unique_ptr getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE) override; - std::optional getNativeSizeEstimate(const Function &F) const; - private: bool isLogging() const { return !!Logger; } std::unique_ptr getMandatoryAdviceImpl(CallBase &CB) override; const bool IsDoingInference; std::unique_ptr Logger; - - const std::optional InitialNativeSize; - std::optional CurrentNativeSize; }; /// A variant of MLInlineAdvice that tracks all non-trivial inlining @@ -183,13 +163,9 @@ class LoggingMLInlineAdvice : public MLInlineAdvice { public: LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB, OptimizationRemarkEmitter &ORE, bool Recommendation, - TrainingLogger &Logger, - std::optional CallerSizeEstimateBefore, - std::optional CalleeSizeEstimateBefore, - bool DefaultDecision, bool Mandatory = false) + TrainingLogger &Logger, bool DefaultDecision, + bool Mandatory = false) : MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger), - CallerSizeEstimateBefore(CallerSizeEstimateBefore), - CalleeSizeEstimateBefore(CalleeSizeEstimateBefore), DefaultDecision(DefaultDecision), Mandatory(Mandatory) {} virtual ~LoggingMLInlineAdvice() = default; @@ -200,59 +176,35 @@ class LoggingMLInlineAdvice : public MLInlineAdvice { } void recordInliningImpl() override { MLInlineAdvice::recordInliningImpl(); - getAdvisor()->resetNativeSize(Caller); - int Reward = std::numeric_limits::max(); - if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() && - !getAdvisor()->isForcedToStop()) { - int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller) + - *CalleeSizeEstimateBefore; - Reward = NativeSizeAfter - - (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore); - getAdvisor()->updateNativeSizeEstimate(Reward); - } - log(Reward, /*Success=*/true); + log(/*Success=*/true); } void recordInliningWithCalleeDeletedImpl() override { MLInlineAdvice::recordInliningWithCalleeDeletedImpl(); - getAdvisor()->resetNativeSize(Caller); - if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() && - !getAdvisor()->isForcedToStop()) { - int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller); - int Reward = NativeSizeAfter - - (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore); - getAdvisor()->updateNativeSizeEstimate(Reward); - log(Reward, /*Success=*/true); - } else { - log(NoReward, /*Success=*/true); - } + log(/*Success=*/true); } void recordUnsuccessfulInliningImpl(const InlineResult &Result) override { MLInlineAdvice::recordUnsuccessfulInliningImpl(Result); - log(NoReward, /*Success=*/false); + log(/*Success=*/false); } void recordUnattemptedInliningImpl() override { MLInlineAdvice::recordUnattemptedInliningImpl(); - log(NoReward, /*Success=*/false); + log(/*Success=*/false); } - void log(int64_t Reward, bool Success) { + void log(bool Success) { if (Mandatory) return; InlineEvent Event; Event.AdvisedDecision = isInliningRecommended(); Event.DefaultDecision = DefaultDecision; Event.Effect = Success; - Event.Reward = Reward; Logger.logInlineEvent(Event, getAdvisor()->getModelRunner()); } - static const int64_t NoReward = 0; TrainingLogger &Logger; - const std::optional CallerSizeEstimateBefore; - const std::optional CalleeSizeEstimateBefore; const int64_t DefaultDecision; const int64_t Mandatory; }; @@ -296,9 +248,9 @@ TrainingLogger::TrainingLogger(StringRef LogFileName, if (EC) dbgs() << (EC.message() + ":" + TrainingLog); - L = std::make_unique( - std::move(OS), FT, TensorSpec::createSpec(RewardName, {1}), - InlineSizeEstimatorAnalysis::isEvaluatorRequested()); + L = std::make_unique(std::move(OS), FT, + TensorSpec::createSpec(RewardName, {1}), + false); L->switchContext(""); } @@ -326,8 +278,6 @@ void TrainingLogger::logInlineEvent(const InlineEvent &Event, L->logTensorValue(DecisionPos, reinterpret_cast(&Event.AdvisedDecision)); L->endObservation(); - if (InlineSizeEstimatorAnalysis::isEvaluatorRequested()) - L->logReward(Event.Reward); // For debugging / later use Effects.push_back(Event.Effect); @@ -340,9 +290,7 @@ DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor( GetModelRunner, std::function GetDefaultAdvice) : MLInlineAdvisor(M, MAM, GetModelRunner, GetDefaultAdvice), - IsDoingInference(isa(getModelRunner())), - InitialNativeSize(isLogging() ? getTotalSizeEstimate() : 0), - CurrentNativeSize(InitialNativeSize) { + IsDoingInference(isa(getModelRunner())) { // We cannot have the case of neither inference nor logging. if (!TrainingLog.empty()) Logger = std::make_unique( @@ -351,29 +299,12 @@ DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor( assert(IsDoingInference || isLogging()); } -std::optional -DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const { - if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested()) - return std::nullopt; - auto &R = - FAM.getResult(const_cast(F)); - if (!R) { - F.getParent()->getContext().emitError( - "Native size estimator is not present."); - return 0; - } - return *R; -} - std::unique_ptr DevelopmentModeMLInlineAdvisor::getMandatoryAdviceImpl(CallBase &CB) { return std::make_unique( /*Advisor=*/this, /*CB=*/CB, /*ORE=*/getCallerORE(CB), /*Recommendation=*/true, /*Logger=*/*Logger, - /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()), - /*CalleeSizeEstimateBefore=*/ - getNativeSizeEstimate(*CB.getCalledFunction()), /*DefaultDecision=*/true, /*Mandatory*/ true); } @@ -391,24 +322,9 @@ DevelopmentModeMLInlineAdvisor::getAdviceFromModel( /*Advisor=*/this, /*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/Recommendation, /*Logger=*/*Logger, - /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()), - /*CalleeSizeEstimateBefore=*/ - getNativeSizeEstimate(*CB.getCalledFunction()), /*DefaultDecision=*/DefaultAdvice); } -size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() { - if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested()) - return 0; - size_t Ret = 0; - for (auto &F : M) { - if (F.isDeclaration()) - continue; - Ret += *getNativeSizeEstimate(F); - } - return Ret; -} - std::unique_ptr llvm::getDevelopmentModeAdvisor( Module &M, ModuleAnalysisManager &MAM, std::function GetDefaultAdvice) { diff --git a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp deleted file mode 100644 index fc635726a6aa4..0000000000000 --- a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp +++ /dev/null @@ -1,281 +0,0 @@ -//===- InlineSizeEstimatorAnalysis.cpp - IR to native size from ML model --===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This implements feature and label extraction for offline supervised learning -// of a IR to native size model. -// -//===----------------------------------------------------------------------===// -#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h" - -#ifdef LLVM_HAVE_TFLITE -#include "llvm/Analysis/Utils/TFUtils.h" -#endif -#include "llvm/IR/Function.h" -#include "llvm/IR/PassManager.h" -#include "llvm/Support/raw_ostream.h" - -using namespace llvm; - -AnalysisKey InlineSizeEstimatorAnalysis::Key; - -#ifdef LLVM_HAVE_TFLITE -#include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/TargetLibraryInfo.h" -#include "llvm/Analysis/TargetTransformInfo.h" -#include "llvm/IR/BasicBlock.h" -#include "llvm/IR/Dominators.h" -#include "llvm/IR/Instructions.h" -#include "llvm/Support/Casting.h" -#include "llvm/Support/CommandLine.h" -#include -#include -#include - -static cl::opt TFIR2NativeModelPath( - "ml-inliner-ir2native-model", cl::Hidden, - cl::desc("Path to saved model evaluating native size from IR.")); - -#define DEBUG_TYPE "inline-size-estimator" -namespace { -unsigned getMaxInstructionID() { -#define LAST_OTHER_INST(NR) return NR; -#include "llvm/IR/Instruction.def" -} - -class IRToNativeSizeLearning { -public: - enum class NamedFeatureIndex : size_t { - InitialSize, - Blocks, - Calls, - IsLocal, - IsLinkOnceODR, - IsLinkOnce, - Loops, - MaxLoopDepth, - MaxDomTreeLevel, - - NumNamedFeatures - }; - static const size_t NumNamedFeatures = - static_cast(NamedFeatureIndex::NumNamedFeatures); - struct FunctionFeatures { - static const size_t FeatureCount; - - std::array NamedFeatures = {0}; - std::vector InstructionHistogram; - std::vector InstructionPairHistogram; - - void fillTensor(int32_t *Ptr) const; - int32_t &operator[](NamedFeatureIndex Pos) { - return NamedFeatures[static_cast(Pos)]; - } - }; - IRToNativeSizeLearning() = default; - - static FunctionFeatures getFunctionFeatures(Function &F, - FunctionAnalysisManager &FAM); -}; - -// This is a point in time - we determined including these pairs of -// consecutive instructions (in the IR layout available at inline time) as -// features improves the model performance. We want to move away from manual -// feature selection. -// The array is given in opcode pairs rather than labels because 1) labels -// weren't readily available, and 2) the successions were hand - extracted. -// -// This array must be sorted. -static const std::array, 137> - ImportantInstructionSuccessions{ - {{1, 1}, {1, 4}, {1, 5}, {1, 7}, {1, 8}, {1, 9}, {1, 11}, - {1, 12}, {1, 13}, {1, 14}, {1, 18}, {1, 20}, {1, 22}, {1, 24}, - {1, 25}, {1, 26}, {1, 27}, {1, 28}, {1, 29}, {1, 30}, {1, 31}, - {1, 32}, {1, 33}, {1, 34}, {1, 39}, {1, 40}, {1, 42}, {1, 45}, - {2, 1}, {2, 2}, {2, 13}, {2, 28}, {2, 29}, {2, 32}, {2, 33}, - {2, 34}, {2, 38}, {2, 48}, {2, 49}, {2, 53}, {2, 55}, {2, 56}, - {13, 2}, {13, 13}, {13, 26}, {13, 33}, {13, 34}, {13, 56}, {15, 27}, - {28, 2}, {28, 48}, {28, 53}, {29, 2}, {29, 33}, {29, 56}, {31, 31}, - {31, 33}, {31, 34}, {31, 49}, {32, 1}, {32, 2}, {32, 13}, {32, 15}, - {32, 28}, {32, 29}, {32, 32}, {32, 33}, {32, 34}, {32, 39}, {32, 40}, - {32, 48}, {32, 49}, {32, 53}, {32, 56}, {33, 1}, {33, 2}, {33, 32}, - {33, 33}, {33, 34}, {33, 49}, {33, 53}, {33, 56}, {34, 1}, {34, 2}, - {34, 32}, {34, 33}, {34, 34}, {34, 49}, {34, 53}, {34, 56}, {38, 34}, - {39, 57}, {40, 34}, {47, 15}, {47, 49}, {48, 2}, {48, 34}, {48, 56}, - {49, 1}, {49, 2}, {49, 28}, {49, 32}, {49, 33}, {49, 34}, {49, 39}, - {49, 49}, {49, 56}, {53, 1}, {53, 2}, {53, 28}, {53, 34}, {53, 53}, - {53, 57}, {55, 1}, {55, 28}, {55, 34}, {55, 53}, {55, 55}, {55, 56}, - {56, 1}, {56, 2}, {56, 7}, {56, 13}, {56, 32}, {56, 33}, {56, 34}, - {56, 49}, {56, 53}, {56, 56}, {56, 64}, {57, 34}, {57, 56}, {57, 57}, - {64, 1}, {64, 64}, {65, 1}, {65, 65}}}; - -// We have: 9 calculated features (the features here); 1 feature for each -// instruction opcode; and 1 feature for each manually-identified sequence. -// For the latter 2, we build a histogram: we count the number of -// occurrences of each instruction opcode or succession of instructions, -// respectively. -// Note that instruction opcodes start from 1. For convenience, we also have an -// always 0 feature for the '0' opcode, hence the extra 1. -const size_t IRToNativeSizeLearning::FunctionFeatures::FeatureCount = - ImportantInstructionSuccessions.size() + getMaxInstructionID() + 1 + - IRToNativeSizeLearning::NumNamedFeatures; - -size_t getSize(Function &F, TargetTransformInfo &TTI) { - size_t Ret = 0; - for (const auto &BB : F) - for (const auto &I : BB) - Ret += TTI.getInstructionCost( - &I, TargetTransformInfo::TargetCostKind::TCK_CodeSize) - .getValue(); - return Ret; -} - -size_t getSize(Function &F, FunctionAnalysisManager &FAM) { - auto &TTI = FAM.getResult(F); - return getSize(F, TTI); -} - -unsigned getMaxDominatorTreeDepth(const Function &F, - const DominatorTree &Tree) { - unsigned Ret = 0; - for (const auto &BB : F) - if (const auto *TN = Tree.getNode(&BB)) - Ret = std::max(Ret, TN->getLevel()); - return Ret; -} -} // namespace - -IRToNativeSizeLearning::FunctionFeatures -IRToNativeSizeLearning::getFunctionFeatures(Function &F, - FunctionAnalysisManager &FAM) { - assert(llvm::is_sorted(ImportantInstructionSuccessions) && - "expected function features are sorted"); - - auto &DomTree = FAM.getResult(F); - FunctionFeatures FF; - size_t InstrCount = getMaxInstructionID() + 1; - FF.InstructionHistogram.resize(InstrCount); - - FF.InstructionPairHistogram.resize(ImportantInstructionSuccessions.size()); - - int StartID = 0; - int LastID = StartID; - auto getPairIndex = [](size_t a, size_t b) { - auto I = llvm::find(ImportantInstructionSuccessions, std::make_pair(a, b)); - if (I == ImportantInstructionSuccessions.end()) - return -1; - return static_cast( - std::distance(ImportantInstructionSuccessions.begin(), I)); - }; - - // We don't want debug calls, because they'd just add noise. - for (const auto &BB : F) { - for (const auto &I : BB.instructionsWithoutDebug()) { - auto ID = I.getOpcode(); - - ++FF.InstructionHistogram[ID]; - int PairIndex = getPairIndex(LastID, ID); - if (PairIndex >= 0) - ++FF.InstructionPairHistogram[PairIndex]; - LastID = ID; - if (isa(I)) - ++FF[NamedFeatureIndex::Calls]; - } - } - - FF[NamedFeatureIndex::InitialSize] = getSize(F, FAM); - FF[NamedFeatureIndex::IsLocal] = F.hasLocalLinkage(); - FF[NamedFeatureIndex::IsLinkOnceODR] = F.hasLinkOnceODRLinkage(); - FF[NamedFeatureIndex::IsLinkOnce] = F.hasLinkOnceLinkage(); - FF[NamedFeatureIndex::Blocks] = F.size(); - auto &LI = FAM.getResult(F); - FF[NamedFeatureIndex::Loops] = std::distance(LI.begin(), LI.end()); - for (auto &L : LI) - FF[NamedFeatureIndex::MaxLoopDepth] = - std::max(FF[NamedFeatureIndex::MaxLoopDepth], - static_cast(L->getLoopDepth())); - FF[NamedFeatureIndex::MaxDomTreeLevel] = getMaxDominatorTreeDepth(F, DomTree); - return FF; -} - -void IRToNativeSizeLearning::FunctionFeatures::fillTensor(int32_t *Ptr) const { - std::copy(NamedFeatures.begin(), NamedFeatures.end(), Ptr); - Ptr += NamedFeatures.size(); - std::copy(InstructionHistogram.begin(), InstructionHistogram.end(), Ptr); - Ptr += InstructionHistogram.size(); - std::copy(InstructionPairHistogram.begin(), InstructionPairHistogram.end(), - Ptr); -} - -bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() { - return !TFIR2NativeModelPath.empty(); -} - -InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() { - if (!isEvaluatorRequested()) { - return; - } - std::vector InputSpecs{TensorSpec::createSpec( - "serving_default_input_1", - {1, static_cast( - IRToNativeSizeLearning::FunctionFeatures::FeatureCount)})}; - std::vector OutputSpecs{ - TensorSpec::createSpec("StatefulPartitionedCall", {1})}; - Evaluator = std::make_unique( - TFIR2NativeModelPath.getValue().c_str(), InputSpecs, OutputSpecs); - if (!Evaluator || !Evaluator->isValid()) { - Evaluator.reset(); - return; - } -} - -InlineSizeEstimatorAnalysis::Result -InlineSizeEstimatorAnalysis::run(const Function &F, - FunctionAnalysisManager &FAM) { - if (!Evaluator) - return std::nullopt; - auto Features = IRToNativeSizeLearning::getFunctionFeatures( - const_cast(F), FAM); - int32_t *V = Evaluator->getInput(0); - Features.fillTensor(V); - auto ER = Evaluator->evaluate(); - if (!ER) - return std::nullopt; - float Ret = *ER->getTensorValue(0); - if (Ret < 0.0) - Ret = 0.0; - return static_cast(Ret); -} - -InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() {} -InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis( - InlineSizeEstimatorAnalysis &&Other) - : Evaluator(std::move(Other.Evaluator)) {} - -#else -namespace llvm { -class TFModelEvaluator {}; -} // namespace llvm -InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() = default; -InlineSizeEstimatorAnalysis ::InlineSizeEstimatorAnalysis( - InlineSizeEstimatorAnalysis &&) {} -InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() = default; -InlineSizeEstimatorAnalysis::Result -InlineSizeEstimatorAnalysis::run(const Function &F, - FunctionAnalysisManager &FAM) { - return std::nullopt; -} -bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() { return false; } -#endif - -PreservedAnalyses -InlineSizeEstimatorAnalysisPrinterPass::run(Function &F, - FunctionAnalysisManager &AM) { - OS << "[InlineSizeEstimatorAnalysis] size estimate for " << F.getName() - << ": " << AM.getResult(F) << "\n"; - return PreservedAnalyses::all(); -} diff --git a/llvm/lib/Analysis/TFLiteUtils.cpp b/llvm/lib/Analysis/TFLiteUtils.cpp index 2762e22f28cef..fcef1c8aa7380 100644 --- a/llvm/lib/Analysis/TFLiteUtils.cpp +++ b/llvm/lib/Analysis/TFLiteUtils.cpp @@ -30,7 +30,6 @@ #include "tensorflow/lite/logger.h" #include -#include #include using namespace llvm; diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp index 74f3a7d131c35..f97abc9a32707 100644 --- a/llvm/lib/Analysis/TargetLibraryInfo.cpp +++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp @@ -15,33 +15,11 @@ #include "llvm/ADT/SmallString.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Module.h" +#include "llvm/IR/SystemLibraries.h" #include "llvm/InitializePasses.h" -#include "llvm/Support/CommandLine.h" #include "llvm/TargetParser/Triple.h" using namespace llvm; -static cl::opt ClVectorLibrary( - "vector-library", cl::Hidden, cl::desc("Vector functions library"), - cl::init(TargetLibraryInfoImpl::NoLibrary), - cl::values(clEnumValN(TargetLibraryInfoImpl::NoLibrary, "none", - "No vector functions library"), - clEnumValN(TargetLibraryInfoImpl::Accelerate, "Accelerate", - "Accelerate framework"), - clEnumValN(TargetLibraryInfoImpl::DarwinLibSystemM, - "Darwin_libsystem_m", "Darwin libsystem_m"), - clEnumValN(TargetLibraryInfoImpl::LIBMVEC, "LIBMVEC", - "GLIBC Vector Math library"), - clEnumValN(TargetLibraryInfoImpl::MASSV, "MASSV", - "IBM MASS vector library"), - clEnumValN(TargetLibraryInfoImpl::SVML, "SVML", - "Intel SVML library"), - clEnumValN(TargetLibraryInfoImpl::SLEEFGNUABI, "sleefgnuabi", - "SIMD Library for Evaluating Elementary Functions"), - clEnumValN(TargetLibraryInfoImpl::ArmPL, "ArmPL", - "Arm Performance Libraries"), - clEnumValN(TargetLibraryInfoImpl::AMDLIBM, "AMDLIBM", - "AMD vector math library"))); - StringLiteral const TargetLibraryInfoImpl::StandardNames[LibFunc::NumLibFuncs] = { #define TLI_DEFINE_STRING @@ -1392,15 +1370,15 @@ const VecDesc VecFuncs_AMDLIBM[] = { void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib( enum VectorLibrary VecLib, const llvm::Triple &TargetTriple) { switch (VecLib) { - case Accelerate: { + case VectorLibrary::Accelerate: { addVectorizableFunctions(VecFuncs_Accelerate); break; } - case DarwinLibSystemM: { + case VectorLibrary::DarwinLibSystemM: { addVectorizableFunctions(VecFuncs_DarwinLibSystemM); break; } - case LIBMVEC: { + case VectorLibrary::LIBMVEC: { switch (TargetTriple.getArch()) { default: break; @@ -1415,15 +1393,15 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib( } break; } - case MASSV: { + case VectorLibrary::MASSV: { addVectorizableFunctions(VecFuncs_MASSV); break; } - case SVML: { + case VectorLibrary::SVML: { addVectorizableFunctions(VecFuncs_SVML); break; } - case SLEEFGNUABI: { + case VectorLibrary::SLEEFGNUABI: { switch (TargetTriple.getArch()) { default: break; @@ -1439,7 +1417,7 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib( } break; } - case ArmPL: { + case VectorLibrary::ArmPL: { switch (TargetTriple.getArch()) { default: break; @@ -1450,11 +1428,11 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib( } break; } - case AMDLIBM: { + case VectorLibrary::AMDLIBM: { addVectorizableFunctions(VecFuncs_AMDLIBM); break; } - case NoLibrary: + case VectorLibrary::NoLibrary: break; } } diff --git a/llvm/lib/Analysis/TrainingLogger.cpp b/llvm/lib/Analysis/TrainingLogger.cpp index 344ca92e18b51..39f79cffdcd88 100644 --- a/llvm/lib/Analysis/TrainingLogger.cpp +++ b/llvm/lib/Analysis/TrainingLogger.cpp @@ -23,7 +23,6 @@ #include "llvm/Support/raw_ostream.h" #include -#include using namespace llvm; diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 7fbadad34058f..7588a47b2b975 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -1608,18 +1608,8 @@ void DwarfDebug::ensureAbstractEntityIsCreatedIfScoped(DwarfCompileUnit &CU, } static const DILocalScope *getRetainedNodeScope(const MDNode *N) { - const DIScope *S; - if (const auto *LV = dyn_cast(N)) - S = LV->getScope(); - else if (const auto *L = dyn_cast(N)) - S = L->getScope(); - else if (const auto *IE = dyn_cast(N)) - S = IE->getScope(); - else - llvm_unreachable("Unexpected retained node!"); - // Ensure the scope is not a DILexicalBlockFile. - return cast(S)->getNonLexicalBlockFileScope(); + return DISubprogram::getRetainedNodeScope(N)->getNonLexicalBlockFileScope(); } // Collect variable information from side table maintained by MF. diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 74ccc34bd5a9d..7068eb440e31b 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1840,7 +1840,8 @@ bool CodeGenPrepare::unfoldPowerOf2Test(CmpInst *Cmp) { /// lose; some adjustment may be wanted there. /// /// Return true if any changes are made. -static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { +static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI, + const DataLayout &DL) { if (TLI.hasMultipleConditionRegisters(EVT::getEVT(Cmp->getType()))) return false; @@ -1848,6 +1849,18 @@ static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { if (TLI.useSoftFloat() && isa(Cmp)) return false; + bool UsedInPhiOrCurrentBlock = any_of(Cmp->users(), [Cmp](User *U) { + return isa(U) || + cast(U)->getParent() == Cmp->getParent(); + }); + + // Avoid sinking larger than legal integer comparisons unless its ONLY used in + // another BB. + if (UsedInPhiOrCurrentBlock && Cmp->getOperand(0)->getType()->isIntegerTy() && + Cmp->getOperand(0)->getType()->getScalarSizeInBits() > + DL.getLargestLegalIntTypeSizeInBits()) + return false; + // Only insert a cmp in each block once. DenseMap InsertedCmps; @@ -2225,7 +2238,7 @@ bool CodeGenPrepare::optimizeURem(Instruction *Rem) { } bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) { - if (sinkCmpExpression(Cmp, *TLI)) + if (sinkCmpExpression(Cmp, *TLI, *DL)) return true; if (combineToUAddWithOverflow(Cmp, ModifiedDT)) diff --git a/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp index a72c2c41acc46..32b6c46303828 100644 --- a/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp @@ -83,13 +83,6 @@ static cl::opt ModelUnderTraining( "regalloc-model", cl::Hidden, cl::desc("The model being trained for register allocation eviction")); -static cl::opt EnableDevelopmentFeatures( - "regalloc-enable-development-features", cl::Hidden, - cl::desc("Whether or not to enable features under development for the ML " - "regalloc advisor")); - -#else -static const bool EnableDevelopmentFeatures = false; #endif // #ifdef LLVM_HAVE_TFLITE /// The score injection pass. @@ -212,23 +205,6 @@ static const std::vector PerLiveRangeShape{1, NumberOfInterferences}; "lowest stage of an interval in this LR") \ M(float, progress, {1}, "ratio of current queue size to initial size") -#ifdef LLVM_HAVE_TFLITE -#define RA_EVICT_FIRST_DEVELOPMENT_FEATURE(M) \ - M(int64_t, instructions, InstructionsShape, \ - "Opcodes of the instructions covered by the eviction problem") - -#define RA_EVICT_REST_DEVELOPMENT_FEATURES(M) \ - M(int64_t, instructions_mapping, InstructionsMappingShape, \ - "A binary matrix mapping LRs to instruction opcodes") \ - M(float, mbb_frequencies, MBBFrequencyShape, \ - "A vector of machine basic block frequencies") \ - M(int64_t, mbb_mapping, InstructionsShape, \ - "A vector of indices mapping instructions to MBBs") -#else -#define RA_EVICT_FIRST_DEVELOPMENT_FEATURE(M) -#define RA_EVICT_REST_DEVELOPMENT_FEATURES(M) -#endif - // The model learns to pick one of the mask == 1 interferences. This is the // name of the output tensor. The contract with the model is that the output // will be guaranteed to be to a mask == 1 position. Using a macro here to @@ -242,12 +218,6 @@ enum FeatureIDs { #define _FEATURE_IDX_SIMPLE(_, name, __, ___) name #define _FEATURE_IDX(A, B, C, D) _FEATURE_IDX_SIMPLE(A, B, C, D), RA_EVICT_FEATURES_LIST(_FEATURE_IDX) FeatureCount, -#ifdef LLVM_HAVE_TFLITE - RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_FEATURE_IDX_SIMPLE) = FeatureCount, -#else - RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_FEATURE_IDX) -#endif // #ifdef LLVM_HAVE_TFLITE - RA_EVICT_REST_DEVELOPMENT_FEATURES(_FEATURE_IDX) FeaturesWithDevelopmentCount #undef _FEATURE_IDX #undef _FEATURE_IDX_SIMPLE }; @@ -268,11 +238,7 @@ void resetInputs(MLModelRunner &Runner) { std::memset(Runner.getTensorUntyped(FeatureIDs::NAME), 0, \ getTotalSize(SHAPE)); RA_EVICT_FEATURES_LIST(_RESET) - if (EnableDevelopmentFeatures) { - RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_RESET) - RA_EVICT_REST_DEVELOPMENT_FEATURES(_RESET) #undef _RESET - } } // Per-live interval components that get aggregated into the feature values @@ -398,13 +364,7 @@ class ReleaseModeEvictionAdvisorProvider final public: ReleaseModeEvictionAdvisorProvider(LLVMContext &Ctx) : RegAllocEvictionAdvisorProvider(AdvisorMode::Release, Ctx) { - if (EnableDevelopmentFeatures) { - InputFeatures = {RA_EVICT_FEATURES_LIST( - _DECL_FEATURES) RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_FEATURES) - RA_EVICT_REST_DEVELOPMENT_FEATURES(_DECL_FEATURES)}; - } else { - InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)}; - } + InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)}; } // support for isa<> and dyn_cast. static bool classof(const RegAllocEvictionAdvisorProvider *R) { @@ -500,25 +460,12 @@ class DevelopmentModeEvictionAdvisorProvider final public: DevelopmentModeEvictionAdvisorProvider(LLVMContext &Ctx) : RegAllocEvictionAdvisorProvider(AdvisorMode::Development, Ctx) { - if (EnableDevelopmentFeatures) { - InputFeatures = {RA_EVICT_FEATURES_LIST( - _DECL_FEATURES) RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_FEATURES) - RA_EVICT_REST_DEVELOPMENT_FEATURES(_DECL_FEATURES)}; - TrainingInputFeatures = { - RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES) - RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_TRAIN_FEATURES) - RA_EVICT_REST_DEVELOPMENT_FEATURES(_DECL_TRAIN_FEATURES) - TensorSpec::createSpec("action_discount", {1}), - TensorSpec::createSpec("action_step_type", {1}), - TensorSpec::createSpec("action_reward", {1})}; - } else { - InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)}; - TrainingInputFeatures = { - RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES) - TensorSpec::createSpec("action_discount", {1}), - TensorSpec::createSpec("action_step_type", {1}), - TensorSpec::createSpec("action_reward", {1})}; - } + InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)}; + TrainingInputFeatures = { + RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES) + TensorSpec::createSpec("action_discount", {1}), + TensorSpec::createSpec("action_step_type", {1}), + TensorSpec::createSpec("action_reward", {1})}; if (ModelUnderTraining.empty() && TrainingLog.empty()) { Ctx.emitError("Regalloc development mode should be requested with at " "least logging enabled and/or a training model"); @@ -814,34 +761,6 @@ MCRegister MLEvictAdvisor::tryFindEvictionCandidate( /*NumUrgent*/ 0.0, LRPosInfo); assert(InitialQSize > 0.0 && "We couldn't have gotten here if we had " "nothing to allocate initially."); -#ifdef LLVM_HAVE_TFLITE - if (EnableDevelopmentFeatures) { - extractInstructionFeatures( - LRPosInfo, Runner, - [this](SlotIndex InputIndex) -> int { - auto *CurrentMachineInstruction = - LIS->getInstructionFromIndex(InputIndex); - if (!CurrentMachineInstruction) { - return -1; - } - return CurrentMachineInstruction->getOpcode(); - }, - [this](SlotIndex InputIndex) -> float { - auto *CurrentMachineInstruction = - LIS->getInstructionFromIndex(InputIndex); - return MBFI.getBlockFreqRelativeToEntryBlock( - CurrentMachineInstruction->getParent()); - }, - [this](SlotIndex InputIndex) -> MachineBasicBlock * { - auto *CurrentMachineInstruction = - LIS->getInstructionFromIndex(InputIndex); - return CurrentMachineInstruction->getParent(); - }, - FeatureIDs::instructions, FeatureIDs::instructions_mapping, - FeatureIDs::mbb_frequencies, FeatureIDs::mbb_mapping, - LIS->getSlotIndexes()->getLastIndex()); - } -#endif // #ifdef LLVM_HAVE_TFLITE // Normalize the features. for (auto &V : Largest) V = V ? V : 1.0; @@ -987,13 +906,6 @@ void MLEvictAdvisor::extractFeatures( HintWeights += LIFC.HintWeights; NumRematerializable += LIFC.IsRemat; - - if (EnableDevelopmentFeatures) { - for (auto CurrentSegment : LI) { - LRPosInfo.push_back( - LRStartEndInfo{CurrentSegment.start, CurrentSegment.end, Pos}); - } - } } size_t Size = 0; if (!Intervals.empty()) { @@ -1209,9 +1121,7 @@ int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition( Log->startObservation(); size_t CurrentFeature = 0; - size_t FeatureCount = EnableDevelopmentFeatures - ? FeatureIDs::FeaturesWithDevelopmentCount - : FeatureIDs::FeatureCount; + size_t FeatureCount = FeatureIDs::FeatureCount; for (; CurrentFeature < FeatureCount; ++CurrentFeature) { Log->logTensorValue(CurrentFeature, reinterpret_cast( diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index 99f76936a180f..f93a7f22c3961 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -79,9 +79,9 @@ static cl::opt EnableJoining("join-liveintervals", cl::desc("Coalesce copies (default=true)"), cl::init(true), cl::Hidden); -static cl::opt UseTerminalRule("terminal-rule", - cl::desc("Apply the terminal rule"), - cl::init(false), cl::Hidden); +static cl::opt + EnableTerminalRule("terminal-rule", cl::desc("Apply the terminal rule"), + cl::init(cl::BOU_UNSET), cl::Hidden); /// Temporary flag to test critical edge unsplitting. static cl::opt EnableJoinSplits( @@ -134,6 +134,7 @@ class RegisterCoalescer : private LiveRangeEdit::Delegate { SlotIndexes *SI = nullptr; const MachineLoopInfo *Loops = nullptr; RegisterClassInfo RegClassInfo; + bool UseTerminalRule = false; /// Position and VReg of a PHI instruction during coalescing. struct PHIValPos { @@ -4320,6 +4321,11 @@ bool RegisterCoalescer::run(MachineFunction &fn) { else JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE); + if (EnableTerminalRule == cl::BOU_UNSET) + UseTerminalRule = STI.enableTerminalRule(); + else + UseTerminalRule = EnableTerminalRule == cl::BOU_TRUE; + // If there are PHIs tracked by debug-info, they will need updating during // coalescing. Build an index of those PHIs to ease updating. SlotIndexes *Slots = LIS->getSlotIndexes(); diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 9d42d02b4cf3f..768d03522d7c8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -18865,6 +18865,26 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { if (SimplifyDemandedBits(SDValue(N, 0))) return SDValue(N, 0); + if (VT != N1.getValueType()) + return SDValue(); + + // If this is equivalent to a disjoint or, replace it with one. This can + // happen if the sign operand is a sign mask (i.e., x << sign_bit_position). + if (DAG.SignBitIsZeroFP(N0) && + DAG.computeKnownBits(N1).Zero.isMaxSignedValue()) { + // TODO: Just directly match the shift pattern. computeKnownBits is heavy + // for a such a narrowly targeted case. + EVT IntVT = VT.changeTypeToInteger(); + // TODO: It appears to be profitable in some situations to unconditionally + // emit a fabs(n0) to perform this combine. + SDValue CastSrc0 = DAG.getNode(ISD::BITCAST, DL, IntVT, N0); + SDValue CastSrc1 = DAG.getNode(ISD::BITCAST, DL, IntVT, N1); + + SDValue SignOr = DAG.getNode(ISD::OR, DL, IntVT, CastSrc0, CastSrc1, + SDNodeFlags::Disjoint); + return DAG.getNode(ISD::BITCAST, DL, VT, SignOr); + } + return SDValue(); } diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 316aacdf6978e..a0baf821698a8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -4842,9 +4842,15 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) { RTLIB::Libcall LC = Node->getOpcode() == ISD::FSINCOS ? RTLIB::getSINCOS(VT) : RTLIB::getSINCOSPI(VT); - bool Expanded = DAG.expandMultipleResultFPLibCall(LC, Node, Results); - if (!Expanded) - llvm_unreachable("Expected scalar FSINCOS[PI] to expand to libcall!"); + bool Expanded = DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT); + if (!Expanded) { + DAG.getContext()->emitError(Twine("no libcall available for ") + + Node->getOperationName(&DAG)); + SDValue Poison = DAG.getPOISON(VT); + Results.push_back(Poison); + Results.push_back(Poison); + } + break; } case ISD::FLOG: @@ -4934,7 +4940,7 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) { EVT VT = Node->getValueType(0); RTLIB::Libcall LC = Node->getOpcode() == ISD::FMODF ? RTLIB::getMODF(VT) : RTLIB::getFREXP(VT); - bool Expanded = DAG.expandMultipleResultFPLibCall(LC, Node, Results, + bool Expanded = DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT, /*CallRetResNo=*/0); if (!Expanded) llvm_unreachable("Expected scalar FFREXP/FMODF to expand to libcall!"); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index 58983cb57d7f6..29c4dac12a81a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -1726,7 +1726,8 @@ void DAGTypeLegalizer::ExpandFloatRes_UnaryWithTwoFPResults( SDNode *N, RTLIB::Libcall LC, std::optional CallRetResNo) { assert(!N->isStrictFPOpcode() && "strictfp not implemented"); SmallVector Results; - DAG.expandMultipleResultFPLibCall(LC, N, Results, CallRetResNo); + DAG.expandMultipleResultFPLibCall(LC, N, Results, N->getValueType(0), + CallRetResNo); for (auto [ResNo, Res] : enumerate(Results)) { SDValue Lo, Hi; GetPairElements(Res, Lo, Hi); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 94751be5b7986..f5a54497c8a98 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -1268,20 +1268,30 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl &Results) { return; break; - case ISD::FSINCOS: + case ISD::FSINCOSPI: { + EVT VT = Node->getValueType(0); + RTLIB::Libcall LC = RTLIB::getSINCOSPI(VT); + if (LC != RTLIB::UNKNOWN_LIBCALL && + DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT)) + return; + + // TODO: Try to see if there's a narrower call available to use before + // scalarizing. + break; + } + case ISD::FSINCOS: { + // FIXME: Try to directly match vector case like fsincospi EVT VT = Node->getValueType(0).getVectorElementType(); - RTLIB::Libcall LC = Node->getOpcode() == ISD::FSINCOS - ? RTLIB::getSINCOS(VT) - : RTLIB::getSINCOSPI(VT); - if (DAG.expandMultipleResultFPLibCall(LC, Node, Results)) + RTLIB::Libcall LC = RTLIB::getSINCOS(VT); + if (DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT)) return; break; } case ISD::FMODF: { - RTLIB::Libcall LC = - RTLIB::getMODF(Node->getValueType(0).getVectorElementType()); - if (DAG.expandMultipleResultFPLibCall(LC, Node, Results, + EVT VT = Node->getValueType(0).getVectorElementType(); + RTLIB::Libcall LC = RTLIB::getMODF(VT); + if (DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT, /*CallRetResNo=*/0)) return; break; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index ca4ec50cfee8a..9e2815aa5aaa7 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2514,18 +2514,20 @@ static bool canFoldStoreIntoLibCallOutputPointers(StoreSDNode *StoreNode, bool SelectionDAG::expandMultipleResultFPLibCall( RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl &Results, - std::optional CallRetResNo) { - LLVMContext &Ctx = *getContext(); - EVT VT = Node->getValueType(0); - unsigned NumResults = Node->getNumValues(); - + EVT CallVT, std::optional CallRetResNo) { if (LC == RTLIB::UNKNOWN_LIBCALL) return false; - const char *LCName = TLI->getLibcallName(LC); - if (!LCName) + EVT VT = Node->getValueType(0); + + RTLIB::LibcallImpl Impl = TLI->getLibcallImpl(LC); + if (Impl == RTLIB::Unsupported) return false; + StringRef LCName = TLI->getLibcallImplName(Impl); + + // FIXME: This should not use TargetLibraryInfo. There should be + // RTLIB::Libcall entries for each used vector type, and directly matched. auto getVecDesc = [&]() -> VecDesc const * { for (bool Masked : {false, true}) { if (VecDesc const *VD = getLibInfo().getVectorMappingInfo( @@ -2538,9 +2540,34 @@ bool SelectionDAG::expandMultipleResultFPLibCall( // For vector types, we must find a vector mapping for the libcall. VecDesc const *VD = nullptr; - if (VT.isVector() && !(VD = getVecDesc())) + if (VT.isVector() && !CallVT.isVector() && !(VD = getVecDesc())) return false; + bool IsMasked = (VD && VD->isMasked()) || + RTLIB::RuntimeLibcallsInfo::hasVectorMaskArgument(Impl); + + // This wrapper function exists because getVectorMappingInfo works in terms of + // function names instead of RTLIB enums. + + // FIXME: If we used a vector mapping, this assumes the calling convention of + // the vector function is the same as the scalar. + + StringRef Name = VD ? VD->getVectorFnName() : LCName; + + return expandMultipleResultFPLibCall(Name, + TLI->getLibcallImplCallingConv(Impl), + Node, Results, CallRetResNo, IsMasked); +} + +// FIXME: This belongs in TargetLowering +bool SelectionDAG::expandMultipleResultFPLibCall( + StringRef Name, CallingConv::ID CC, SDNode *Node, + SmallVectorImpl &Results, std::optional CallRetResNo, + bool IsMasked) { + LLVMContext &Ctx = *getContext(); + EVT VT = Node->getValueType(0); + unsigned NumResults = Node->getNumValues(); + // Find users of the node that store the results (and share input chains). The // destination pointers can be used instead of creating stack allocations. SDValue StoresInChain; @@ -2598,7 +2625,7 @@ bool SelectionDAG::expandMultipleResultFPLibCall( SDLoc DL(Node); // Pass the vector mask (if required). - if (VD && VD->isMasked()) { + if (IsMasked) { EVT MaskVT = TLI->getSetCCResultType(getDataLayout(), Ctx, VT); SDValue Mask = getBoolConstant(true, DL, MaskVT, VT); Args.emplace_back(Mask, MaskVT.getTypeForEVT(Ctx)); @@ -2608,11 +2635,11 @@ bool SelectionDAG::expandMultipleResultFPLibCall( ? Node->getValueType(*CallRetResNo).getTypeForEVT(Ctx) : Type::getVoidTy(Ctx); SDValue InChain = StoresInChain ? StoresInChain : getEntryNode(); - SDValue Callee = getExternalSymbol(VD ? VD->getVectorFnName().data() : LCName, - TLI->getPointerTy(getDataLayout())); + SDValue Callee = + getExternalSymbol(Name.data(), TLI->getPointerTy(getDataLayout())); TargetLowering::CallLoweringInfo CLI(*this); - CLI.setDebugLoc(DL).setChain(InChain).setLibCallee( - TLI->getLibcallCallingConv(LC), RetType, Callee, std::move(Args)); + CLI.setDebugLoc(DL).setChain(InChain).setLibCallee(CC, RetType, Callee, + std::move(Args)); auto [Call, CallChain] = TLI->LowerCallTo(CLI); @@ -2920,6 +2947,34 @@ bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); } +bool SelectionDAG::SignBitIsZeroFP(SDValue Op, unsigned Depth) const { + if (Depth >= MaxRecursionDepth) + return false; // Limit search depth. + + unsigned Opc = Op.getOpcode(); + switch (Opc) { + case ISD::FABS: + return true; + case ISD::AssertNoFPClass: { + FPClassTest NoFPClass = + static_cast(Op.getConstantOperandVal(1)); + + const FPClassTest TestMask = fcNan | fcNegative; + return (NoFPClass & TestMask) == TestMask; + } + case ISD::ARITH_FENCE: + return SignBitIsZeroFP(Op, Depth + 1); + case ISD::FEXP: + case ISD::FEXP2: + case ISD::FEXP10: + return Op->getFlags().hasNoNaNs(); + default: + return false; + } + + llvm_unreachable("covered opcode switch"); +} + /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use /// this predicate to simplify operations downstream. Mask is known to be zero /// for bits that V cannot have. @@ -4121,6 +4176,25 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, Known.One.clearLowBits(LogOfAlign); break; } + case ISD::AssertNoFPClass: { + Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); + + FPClassTest NoFPClass = + static_cast(Op.getConstantOperandVal(1)); + const FPClassTest NegativeTestMask = fcNan | fcNegative; + if ((NoFPClass & NegativeTestMask) == NegativeTestMask) { + // Cannot be negative. + Known.makeNonNegative(); + } + + const FPClassTest PositiveTestMask = fcNan | fcPositive; + if ((NoFPClass & PositiveTestMask) == PositiveTestMask) { + // Cannot be positive. + Known.makeNegative(); + } + + break; + } case ISD::FGETSIGN: // All bits are zero except the low bit. Known.Zero.setBitsFrom(1); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index c9a02e49f9f99..602b589633052 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4638,6 +4638,12 @@ static std::optional getRange(const Instruction &I) { return std::nullopt; } +static FPClassTest getNoFPClass(const Instruction &I) { + if (const auto *CB = dyn_cast(&I)) + return CB->getRetNoFPClass(); + return fcNone; +} + void SelectionDAGBuilder::visitLoad(const LoadInst &I) { if (I.isAtomic()) return visitAtomicLoad(I); @@ -9125,6 +9131,7 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee, if (Result.first.getNode()) { Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first); + Result.first = lowerNoFPClassToAssertNoFPClass(DAG, CB, Result.first); setValue(&CB, Result.first); } @@ -10711,6 +10718,16 @@ SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG, return DAG.getMergeValues(Ops, SL); } +SDValue SelectionDAGBuilder::lowerNoFPClassToAssertNoFPClass( + SelectionDAG &DAG, const Instruction &I, SDValue Op) { + FPClassTest Classes = getNoFPClass(I); + if (Classes == fcNone) + return Op; + + return DAG.getNode(ISD::AssertNoFPClass, SDLoc(Op), Op.getValueType(), Op, + DAG.getTargetConstant(Classes, SDLoc(), MVT::i32)); +} + /// Populate a CallLowerinInfo (into \p CLI) based on the properties of /// the call being lowered. /// diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h index ed63bee58c957..13e2daa783147 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -429,6 +429,10 @@ class SelectionDAGBuilder { SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op); + // Lower nofpclass attributes to AssertNoFPClass + SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG, + const Instruction &I, SDValue Op); + void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 8bc5d2f3e421f..e78dfb12505c7 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -2448,7 +2448,7 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, // a cycle in the scheduling graph. // If the node has glue, walk down the graph to the "lowest" node in the - // glueged set. + // glued set. EVT VT = Root->getValueType(Root->getNumValues()-1); while (VT == MVT::Glue) { SDNode *GU = Root->getGluedUser(); diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index 1cc591c17f9c3..814b4b57a0b9b 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -430,6 +430,24 @@ RTLIB::Libcall RTLIB::getSINCOS(EVT RetVT) { } RTLIB::Libcall RTLIB::getSINCOSPI(EVT RetVT) { + // TODO: Tablegen should generate this function + if (RetVT.isVector()) { + if (!RetVT.isSimple()) + return RTLIB::UNKNOWN_LIBCALL; + switch (RetVT.getSimpleVT().SimpleTy) { + case MVT::v4f32: + return RTLIB::SINCOSPI_V4F32; + case MVT::v2f64: + return RTLIB::SINCOSPI_V2F64; + case MVT::nxv4f32: + return RTLIB::SINCOSPI_NXV4F32; + case MVT::nxv2f64: + return RTLIB::SINCOSPI_NXV2F64; + default: + return RTLIB::UNKNOWN_LIBCALL; + } + } + return getFPLibCall(RetVT, SINCOSPI_F32, SINCOSPI_F64, SINCOSPI_F80, SINCOSPI_F128, SINCOSPI_PPCF128); } diff --git a/llvm/lib/CodeGen/TargetSchedule.cpp b/llvm/lib/CodeGen/TargetSchedule.cpp index 7ae9e0e37bbab..cd951a1a4f53e 100644 --- a/llvm/lib/CodeGen/TargetSchedule.cpp +++ b/llvm/lib/CodeGen/TargetSchedule.cpp @@ -25,7 +25,6 @@ #include "llvm/Support/raw_ostream.h" #include #include -#include using namespace llvm; diff --git a/llvm/lib/DebugInfo/DWARF/DWARFUnwindTablePrinter.cpp b/llvm/lib/DebugInfo/DWARF/DWARFUnwindTablePrinter.cpp index a88f4a554bcf0..a4bdd1f0a867c 100644 --- a/llvm/lib/DebugInfo/DWARF/DWARFUnwindTablePrinter.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFUnwindTablePrinter.cpp @@ -15,7 +15,6 @@ #include #include #include -#include using namespace llvm; using namespace dwarf; diff --git a/llvm/lib/Frontend/Driver/CodeGenOptions.cpp b/llvm/lib/Frontend/Driver/CodeGenOptions.cpp index b546e816419e3..4e1602703fb35 100644 --- a/llvm/lib/Frontend/Driver/CodeGenOptions.cpp +++ b/llvm/lib/Frontend/Driver/CodeGenOptions.cpp @@ -8,6 +8,7 @@ #include "llvm/Frontend/Driver/CodeGenOptions.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/IR/SystemLibraries.h" #include "llvm/ProfileData/InstrProfCorrelator.h" #include "llvm/TargetParser/Triple.h" @@ -25,35 +26,35 @@ TargetLibraryInfoImpl *createTLII(const llvm::Triple &TargetTriple, using VectorLibrary = llvm::driver::VectorLibrary; switch (Veclib) { case VectorLibrary::Accelerate: - TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate, + TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::Accelerate, TargetTriple); break; case VectorLibrary::LIBMVEC: - TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::LIBMVEC, + TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::LIBMVEC, TargetTriple); break; case VectorLibrary::MASSV: - TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::MASSV, + TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::MASSV, TargetTriple); break; case VectorLibrary::SVML: - TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML, + TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::SVML, TargetTriple); break; case VectorLibrary::SLEEF: - TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SLEEFGNUABI, + TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::SLEEFGNUABI, TargetTriple); break; case VectorLibrary::Darwin_libsystem_m: TLII->addVectorizableFunctionsFromVecLib( - TargetLibraryInfoImpl::DarwinLibSystemM, TargetTriple); + llvm::VectorLibrary::DarwinLibSystemM, TargetTriple); break; case VectorLibrary::ArmPL: - TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::ArmPL, + TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::ArmPL, TargetTriple); break; case VectorLibrary::AMDLIBM: - TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::AMDLIBM, + TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::AMDLIBM, TargetTriple); break; default: diff --git a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp index c4aa2c7638450..45818deda8aa6 100644 --- a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp +++ b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp @@ -29,7 +29,6 @@ #include "llvm/Transforms/Utils/ModuleUtils.h" #include -#include #include using namespace llvm; diff --git a/llvm/lib/IR/CMakeLists.txt b/llvm/lib/IR/CMakeLists.txt index 10572ff708bd3..ebdc2ca08d102 100644 --- a/llvm/lib/IR/CMakeLists.txt +++ b/llvm/lib/IR/CMakeLists.txt @@ -67,6 +67,7 @@ add_llvm_component_library(LLVMCore ReplaceConstant.cpp Statepoint.cpp StructuralHash.cpp + SystemLibraries.cpp Type.cpp TypedPointerType.cpp TypeFinder.cpp diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp index bf8aacc67cc10..ba3c039503720 100644 --- a/llvm/lib/IR/DebugInfo.cpp +++ b/llvm/lib/IR/DebugInfo.cpp @@ -247,7 +247,7 @@ void DebugInfoFinder::processType(DIType *DT) { } } -void DebugInfoFinder::processImportedEntity(DIImportedEntity *Import) { +void DebugInfoFinder::processImportedEntity(const DIImportedEntity *Import) { auto *Entity = Import->getEntity(); if (auto *T = dyn_cast(Entity)) processType(T); @@ -307,15 +307,13 @@ void DebugInfoFinder::processSubprogram(DISubprogram *SP) { } } - for (auto *N : SP->getRetainedNodes()) { - if (auto *Var = dyn_cast_or_null(N)) - processVariable(Var); - else if (auto *Import = dyn_cast_or_null(N)) - processImportedEntity(Import); - } + SP->forEachRetainedNode( + [this](const DILocalVariable *LV) { processVariable(LV); }, + [](const DILabel *L) {}, + [this](const DIImportedEntity *IE) { processImportedEntity(IE); }); } -void DebugInfoFinder::processVariable(DILocalVariable *DV) { +void DebugInfoFinder::processVariable(const DILocalVariable *DV) { if (!NodesSeen.insert(DV).second) return; processScope(DV->getScope()); diff --git a/llvm/lib/IR/DebugInfoMetadata.cpp b/llvm/lib/IR/DebugInfoMetadata.cpp index 0f9e24fd99a5c..c2975e68a4f06 100644 --- a/llvm/lib/IR/DebugInfoMetadata.cpp +++ b/llvm/lib/IR/DebugInfoMetadata.cpp @@ -1441,6 +1441,19 @@ bool DISubprogram::describes(const Function *F) const { assert(F && "Invalid function"); return F->getSubprogram() == this; } + +const DIScope *DISubprogram::getRawRetainedNodeScope(const MDNode *N) { + return visitRetainedNode( + N, [](const DILocalVariable *LV) { return LV->getScope(); }, + [](const DILabel *L) { return L->getScope(); }, + [](const DIImportedEntity *IE) { return IE->getScope(); }, + [](const Metadata *N) { return nullptr; }); +} + +const DILocalScope *DISubprogram::getRetainedNodeScope(const MDNode *N) { + return cast(getRawRetainedNodeScope(N)); +} + DILexicalBlockBase::DILexicalBlockBase(LLVMContext &C, unsigned ID, StorageType Storage, ArrayRef Ops) diff --git a/llvm/lib/IR/RuntimeLibcalls.cpp b/llvm/lib/IR/RuntimeLibcalls.cpp index f4c5c6ff35af6..e66b9adb43ac4 100644 --- a/llvm/lib/IR/RuntimeLibcalls.cpp +++ b/llvm/lib/IR/RuntimeLibcalls.cpp @@ -10,6 +10,7 @@ #include "llvm/ADT/FloatingPointMode.h" #include "llvm/ADT/StringTable.h" #include "llvm/IR/Module.h" +#include "llvm/IR/SystemLibraries.h" #include "llvm/Support/Debug.h" #include "llvm/Support/xxhash.h" #include "llvm/TargetParser/ARMTargetParser.h" @@ -25,6 +26,49 @@ using namespace RTLIB; #define DEFINE_GET_LOOKUP_LIBCALL_IMPL_NAME #include "llvm/IR/RuntimeLibcalls.inc" +RuntimeLibcallsInfo::RuntimeLibcallsInfo(const Triple &TT, + ExceptionHandling ExceptionModel, + FloatABI::ABIType FloatABI, + EABI EABIVersion, StringRef ABIName) { + // FIXME: The ExceptionModel parameter is to handle the field in + // TargetOptions. This interface fails to distinguish the forced disable + // case for targets which support exceptions by default. This should + // probably be a module flag and removed from TargetOptions. + if (ExceptionModel == ExceptionHandling::None) + ExceptionModel = TT.getDefaultExceptionHandling(); + + initLibcalls(TT, ExceptionModel, FloatABI, EABIVersion, ABIName); + + // TODO: Tablegen should generate these sets + switch (ClVectorLibrary) { + case VectorLibrary::SLEEFGNUABI: + for (RTLIB::LibcallImpl Impl : + {RTLIB::impl__ZGVnN2vl8l8_sincos, RTLIB::impl__ZGVnN4vl4l4_sincosf, + RTLIB::impl__ZGVsNxvl8l8_sincos, RTLIB::impl__ZGVsNxvl4l4_sincosf, + RTLIB::impl__ZGVnN4vl4l4_sincospif, RTLIB::impl__ZGVnN2vl8l8_sincospi, + RTLIB::impl__ZGVsNxvl4l4_sincospif, + RTLIB::impl__ZGVsNxvl8l8_sincospi}) + setAvailable(Impl); + break; + case VectorLibrary::ArmPL: + for (RTLIB::LibcallImpl Impl : + {RTLIB::impl_armpl_vsincosq_f64, RTLIB::impl_armpl_vsincosq_f32, + RTLIB::impl_armpl_svsincos_f64_x, RTLIB::impl_armpl_svsincos_f32_x, + RTLIB::impl_armpl_vsincospiq_f32, RTLIB::impl_armpl_vsincospiq_f64, + RTLIB::impl_armpl_svsincospi_f32_x, + RTLIB::impl_armpl_svsincospi_f64_x}) + setAvailable(Impl); + + for (RTLIB::LibcallImpl Impl : + {RTLIB::impl_armpl_vsincosq_f64, RTLIB::impl_armpl_vsincosq_f32}) + setLibcallImplCallingConv(Impl, CallingConv::AArch64_VectorCall); + + break; + default: + break; + } +} + RuntimeLibcallsInfo::RuntimeLibcallsInfo(const Module &M) : RuntimeLibcallsInfo(M.getTargetTriple()) { // TODO: Consider module flags @@ -88,6 +132,8 @@ RuntimeLibcallsInfo::getFunctionTy(LLVMContext &Ctx, const Triple &TT, static constexpr Attribute::AttrKind CommonFnAttrs[] = { Attribute::NoCallback, Attribute::NoFree, Attribute::NoSync, Attribute::NoUnwind, Attribute::WillReturn}; + static constexpr Attribute::AttrKind CommonPtrArgAttrs[] = { + Attribute::NoAlias, Attribute::WriteOnly, Attribute::NonNull}; switch (LibcallImpl) { case RTLIB::impl___sincos_stret: @@ -151,9 +197,86 @@ RuntimeLibcallsInfo::getFunctionTy(LLVMContext &Ctx, const Triple &TT, fcNegNormal)); return {FuncTy, Attrs}; } + case RTLIB::impl__ZGVnN2vl8l8_sincos: + case RTLIB::impl__ZGVnN4vl4l4_sincosf: + case RTLIB::impl__ZGVsNxvl8l8_sincos: + case RTLIB::impl__ZGVsNxvl4l4_sincosf: + case RTLIB::impl_armpl_vsincosq_f64: + case RTLIB::impl_armpl_vsincosq_f32: + case RTLIB::impl_armpl_svsincos_f64_x: + case RTLIB::impl_armpl_svsincos_f32_x: + case RTLIB::impl__ZGVnN4vl4l4_sincospif: + case RTLIB::impl__ZGVnN2vl8l8_sincospi: + case RTLIB::impl__ZGVsNxvl4l4_sincospif: + case RTLIB::impl__ZGVsNxvl8l8_sincospi: + case RTLIB::impl_armpl_vsincospiq_f32: + case RTLIB::impl_armpl_vsincospiq_f64: + case RTLIB::impl_armpl_svsincospi_f32_x: + case RTLIB::impl_armpl_svsincospi_f64_x: { + AttrBuilder FuncAttrBuilder(Ctx); + + bool IsF32 = LibcallImpl == RTLIB::impl__ZGVnN4vl4l4_sincospif || + LibcallImpl == RTLIB::impl__ZGVsNxvl4l4_sincospif || + LibcallImpl == RTLIB::impl_armpl_vsincospiq_f32 || + LibcallImpl == RTLIB::impl_armpl_svsincospi_f32_x || + LibcallImpl == RTLIB::impl__ZGVnN4vl4l4_sincosf || + LibcallImpl == RTLIB::impl__ZGVsNxvl4l4_sincosf || + LibcallImpl == RTLIB::impl_armpl_vsincosq_f32 || + LibcallImpl == RTLIB::impl_armpl_svsincos_f32_x; + + Type *ScalarTy = IsF32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx); + unsigned EC = IsF32 ? 4 : 2; + + bool IsScalable = LibcallImpl == RTLIB::impl__ZGVsNxvl8l8_sincos || + LibcallImpl == RTLIB::impl__ZGVsNxvl4l4_sincosf || + LibcallImpl == RTLIB::impl_armpl_svsincos_f32_x || + LibcallImpl == RTLIB::impl_armpl_svsincos_f64_x || + LibcallImpl == RTLIB::impl__ZGVsNxvl4l4_sincospif || + LibcallImpl == RTLIB::impl__ZGVsNxvl8l8_sincospi || + LibcallImpl == RTLIB::impl_armpl_svsincospi_f32_x || + LibcallImpl == RTLIB::impl_armpl_svsincospi_f64_x; + VectorType *VecTy = VectorType::get(ScalarTy, EC, IsScalable); + + for (Attribute::AttrKind Attr : CommonFnAttrs) + FuncAttrBuilder.addAttribute(Attr); + FuncAttrBuilder.addMemoryAttr(MemoryEffects::argMemOnly(ModRefInfo::Mod)); + + AttributeList Attrs; + Attrs = Attrs.addFnAttributes(Ctx, FuncAttrBuilder); + + { + AttrBuilder ArgAttrBuilder(Ctx); + for (Attribute::AttrKind AK : CommonPtrArgAttrs) + ArgAttrBuilder.addAttribute(AK); + ArgAttrBuilder.addAlignmentAttr(DL.getABITypeAlign(VecTy)); + Attrs = Attrs.addParamAttributes(Ctx, 1, ArgAttrBuilder); + Attrs = Attrs.addParamAttributes(Ctx, 2, ArgAttrBuilder); + } + + PointerType *PtrTy = PointerType::get(Ctx, 0); + SmallVector ArgTys = {VecTy, PtrTy, PtrTy}; + if (hasVectorMaskArgument(LibcallImpl)) + ArgTys.push_back(VectorType::get(Type::getInt1Ty(Ctx), EC, IsScalable)); + + return {FunctionType::get(Type::getVoidTy(Ctx), ArgTys, false), Attrs}; + } default: return {}; } return {}; } + +bool RuntimeLibcallsInfo::hasVectorMaskArgument(RTLIB::LibcallImpl Impl) { + /// FIXME: This should be generated by tablegen and support the argument at an + /// arbitrary position + switch (Impl) { + case RTLIB::impl_armpl_svsincos_f32_x: + case RTLIB::impl_armpl_svsincos_f64_x: + case RTLIB::impl_armpl_svsincospi_f32_x: + case RTLIB::impl_armpl_svsincospi_f64_x: + return true; + default: + return false; + } +} diff --git a/llvm/lib/IR/SystemLibraries.cpp b/llvm/lib/IR/SystemLibraries.cpp new file mode 100644 index 0000000000000..fa4ac2adb7296 --- /dev/null +++ b/llvm/lib/IR/SystemLibraries.cpp @@ -0,0 +1,34 @@ +//===-----------------------------------------------------------------------==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/SystemLibraries.h" +#include "llvm/Support/CommandLine.h" + +using namespace llvm; + +VectorLibrary llvm::ClVectorLibrary; + +static cl::opt ClVectorLibraryOpt( + "vector-library", cl::Hidden, cl::desc("Vector functions library"), + cl::location(llvm::ClVectorLibrary), cl::init(VectorLibrary::NoLibrary), + cl::values( + clEnumValN(VectorLibrary::NoLibrary, "none", + "No vector functions library"), + clEnumValN(VectorLibrary::Accelerate, "Accelerate", + "Accelerate framework"), + clEnumValN(VectorLibrary::DarwinLibSystemM, "Darwin_libsystem_m", + "Darwin libsystem_m"), + clEnumValN(VectorLibrary::LIBMVEC, "LIBMVEC", + "GLIBC Vector Math library"), + clEnumValN(VectorLibrary::MASSV, "MASSV", "IBM MASS vector library"), + clEnumValN(VectorLibrary::SVML, "SVML", "Intel SVML library"), + clEnumValN(VectorLibrary::SLEEFGNUABI, "sleefgnuabi", + "SIMD Library for Evaluating Elementary Functions"), + clEnumValN(VectorLibrary::ArmPL, "ArmPL", "Arm Performance Libraries"), + clEnumValN(VectorLibrary::AMDLIBM, "AMDLIBM", + "AMD vector math library"))); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 427d91afe8c93..6031659e4002e 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -1575,11 +1575,27 @@ void Verifier::visitDISubprogram(const DISubprogram &N) { auto *Node = dyn_cast(RawNode); CheckDI(Node, "invalid retained nodes list", &N, RawNode); for (Metadata *Op : Node->operands()) { - CheckDI(Op && (isa(Op) || isa(Op) || - isa(Op)), + CheckDI(Op, "nullptr in retained nodes", &N, Node); + + auto True = [](const Metadata *) { return true; }; + auto False = [](const Metadata *) { return false; }; + bool IsTypeCorrect = + DISubprogram::visitRetainedNode(Op, True, True, True, False); + CheckDI(IsTypeCorrect, "invalid retained nodes, expected DILocalVariable, DILabel or " "DIImportedEntity", &N, Node, Op); + + auto *RetainedNode = cast(Op); + auto *RetainedNodeScope = dyn_cast_or_null( + DISubprogram::getRawRetainedNodeScope(RetainedNode)); + CheckDI(RetainedNodeScope, + "invalid retained nodes, retained node is not local", &N, Node, + RetainedNode); + CheckDI( + RetainedNodeScope->getSubprogram() == &N, + "invalid retained nodes, retained node does not belong to subprogram", + &N, Node, RetainedNode, RetainedNodeScope); } } CheckDI(!hasConflictingReferenceFlags(N.getFlags()), diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index e0babc4385aab..0d190ea448931 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -45,7 +45,6 @@ #include "llvm/Analysis/IR2Vec.h" #include "llvm/Analysis/IVUsers.h" #include "llvm/Analysis/InlineAdvisor.h" -#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h" #include "llvm/Analysis/InstCount.h" #include "llvm/Analysis/KernelInfo.h" #include "llvm/Analysis/LastRunTrackingAnalysis.h" diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def index 0d278a28a88a7..2bd1b385a61c8 100644 --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -366,7 +366,6 @@ FUNCTION_ANALYSIS("ephemerals", EphemeralValuesAnalysis()) FUNCTION_ANALYSIS("func-properties", FunctionPropertiesAnalysis()) FUNCTION_ANALYSIS("machine-function-info", MachineFunctionAnalysis(*TM)) FUNCTION_ANALYSIS("gc-function", GCFunctionAnalysis()) -FUNCTION_ANALYSIS("inliner-size-estimator", InlineSizeEstimatorAnalysis()) FUNCTION_ANALYSIS("last-run-tracking", LastRunTrackingAnalysis()) FUNCTION_ANALYSIS("lazy-value-info", LazyValueAnalysis()) FUNCTION_ANALYSIS("loops", LoopAnalysis()) @@ -522,8 +521,6 @@ FUNCTION_PASS("print", DominanceFrontierPrinterPass(errs())) FUNCTION_PASS("print", DominatorTreePrinterPass(errs())) FUNCTION_PASS("print", FunctionPropertiesPrinterPass(errs())) FUNCTION_PASS("print", InlineCostAnnotationPrinterPass(errs())) -FUNCTION_PASS("print", - InlineSizeEstimatorAnalysisPrinterPass(errs())) FUNCTION_PASS("print", LazyValueInfoPrinterPass(errs())) FUNCTION_PASS("print", LoopPrinterPass(errs())) FUNCTION_PASS("print", MemorySSAWalkerPrinterPass(errs())) diff --git a/llvm/lib/Support/Mustache.cpp b/llvm/lib/Support/Mustache.cpp index 6c140be59fc4b..8b95049eb9648 100644 --- a/llvm/lib/Support/Mustache.cpp +++ b/llvm/lib/Support/Mustache.cpp @@ -10,7 +10,6 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include -#include #include #define DEBUG_TYPE "mustache" diff --git a/llvm/lib/Support/Windows/Program.inc b/llvm/lib/Support/Windows/Program.inc index ec785e407cc57..5dcd2c945bc85 100644 --- a/llvm/lib/Support/Windows/Program.inc +++ b/llvm/lib/Support/Windows/Program.inc @@ -23,7 +23,6 @@ #include #include #include -#include #include //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 4b4073365483e..55a04cca4c394 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -9590,6 +9590,27 @@ AArch64InstrInfo::getOutliningCandidateInfo( unsigned NumBytesToCreateFrame = 0; + // Avoid splitting ADRP ADD/LDR pair into outlined functions. + // These instructions are fused together by the scheduler. + // Any candidate where ADRP is the last instruction should be rejected + // as that will lead to splitting ADRP pair. + MachineInstr &LastMI = RepeatedSequenceLocs[0].back(); + MachineInstr &FirstMI = RepeatedSequenceLocs[0].front(); + if (LastMI.getOpcode() == AArch64::ADRP && + (LastMI.getOperand(1).getTargetFlags() & AArch64II::MO_PAGE) != 0 && + (LastMI.getOperand(1).getTargetFlags() & AArch64II::MO_GOT) != 0) { + return std::nullopt; + } + + // Similarly any candidate where the first instruction is ADD/LDR with a + // page offset should be rejected to avoid ADRP splitting. + if ((FirstMI.getOpcode() == AArch64::ADDXri || + FirstMI.getOpcode() == AArch64::LDRXui) && + (FirstMI.getOperand(2).getTargetFlags() & AArch64II::MO_PAGEOFF) != 0 && + (FirstMI.getOperand(2).getTargetFlags() & AArch64II::MO_GOT) != 0) { + return std::nullopt; + } + // We only allow outlining for functions having exactly matching return // address signing attributes, i.e., all share the same value for the // attribute "sign-return-address" and all share the same type of key they diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp index a5048b9c9e61d..eaf8723094797 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -1123,24 +1123,85 @@ unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, } } -// FORM_TRANSPOSED_REG_TUPLE nodes are created to improve register allocation -// where a consecutive multi-vector tuple is constructed from the same indices -// of multiple strided loads. This may still result in unnecessary copies -// between the loads and the tuple. Here we try to return a hint to assign the -// contiguous ZPRMulReg starting at the same register as the first operand of -// the pseudo, which should be a subregister of the first strided load. +// We add regalloc hints for different cases: +// * Choosing a better destination operand for predicated SVE instructions +// where the inactive lanes are undef, by choosing a register that is not +// unique to the other operands of the instruction. // -// For example, if the first strided load has been assigned $z16_z20_z24_z28 -// and the operands of the pseudo are each accessing subregister zsub2, we -// should look through through Order to find a contiguous register which -// begins with $z24 (i.e. $z24_z25_z26_z27). +// * Improve register allocation for SME multi-vector instructions where we can +// benefit from the strided- and contiguous register multi-vector tuples. // +// Here FORM_TRANSPOSED_REG_TUPLE nodes are created to improve register +// allocation where a consecutive multi-vector tuple is constructed from the +// same indices of multiple strided loads. This may still result in +// unnecessary copies between the loads and the tuple. Here we try to return a +// hint to assign the contiguous ZPRMulReg starting at the same register as +// the first operand of the pseudo, which should be a subregister of the first +// strided load. +// +// For example, if the first strided load has been assigned $z16_z20_z24_z28 +// and the operands of the pseudo are each accessing subregister zsub2, we +// should look through through Order to find a contiguous register which +// begins with $z24 (i.e. $z24_z25_z26_z27). bool AArch64RegisterInfo::getRegAllocationHints( Register VirtReg, ArrayRef Order, SmallVectorImpl &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { - auto &ST = MF.getSubtarget(); + const AArch64InstrInfo *TII = + MF.getSubtarget().getInstrInfo(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + + // For predicated SVE instructions where the inactive lanes are undef, + // pick a destination register that is not unique to avoid introducing + // a movprfx. + const TargetRegisterClass *RegRC = MRI.getRegClass(VirtReg); + if (AArch64::ZPRRegClass.hasSubClassEq(RegRC)) { + for (const MachineOperand &DefOp : MRI.def_operands(VirtReg)) { + const MachineInstr &Def = *DefOp.getParent(); + if (DefOp.isImplicit() || + (TII->get(Def.getOpcode()).TSFlags & AArch64::FalseLanesMask) != + AArch64::FalseLanesUndef) + continue; + + unsigned InstFlags = + TII->get(AArch64::getSVEPseudoMap(Def.getOpcode())).TSFlags; + + for (MCPhysReg R : Order) { + auto AddHintIfSuitable = [&](MCPhysReg R, const MachineOperand &MO) { + // R is a suitable register hint if there exists an operand for the + // instruction that is not yet allocated a register or if R matches + // one of the other source operands. + if (!VRM->hasPhys(MO.getReg()) || VRM->getPhys(MO.getReg()) == R) + Hints.push_back(R); + }; + + switch (InstFlags & AArch64::DestructiveInstTypeMask) { + default: + break; + case AArch64::DestructiveTernaryCommWithRev: + AddHintIfSuitable(R, Def.getOperand(2)); + AddHintIfSuitable(R, Def.getOperand(3)); + AddHintIfSuitable(R, Def.getOperand(4)); + break; + case AArch64::DestructiveBinaryComm: + case AArch64::DestructiveBinaryCommWithRev: + AddHintIfSuitable(R, Def.getOperand(2)); + AddHintIfSuitable(R, Def.getOperand(3)); + break; + case AArch64::DestructiveBinary: + case AArch64::DestructiveBinaryImm: + AddHintIfSuitable(R, Def.getOperand(2)); + break; + } + } + } + + if (Hints.size()) + return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, + MF, VRM); + } + if (!ST.hasSME() || !ST.isStreaming()) return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); @@ -1153,8 +1214,7 @@ bool AArch64RegisterInfo::getRegAllocationHints( // FORM_TRANSPOSED_REG_TUPLE pseudo, we want to favour reducing copy // instructions over reducing the number of clobbered callee-save registers, // so we add the strided registers as a hint. - const MachineRegisterInfo &MRI = MF.getRegInfo(); - unsigned RegID = MRI.getRegClass(VirtReg)->getID(); + unsigned RegID = RegRC->getID(); if (RegID == AArch64::ZPR2StridedOrContiguousRegClassID || RegID == AArch64::ZPR4StridedOrContiguousRegClassID) { diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp index 068954f1764fb..0bf2b31b10846 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp @@ -54,7 +54,6 @@ #include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h" #include #include -#include using namespace llvm; diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h index f377b8aaf1333..da4bd878b8853 100644 --- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h +++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h @@ -1040,6 +1040,8 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo, return true; } + bool enableTerminalRule() const override { return true; } + bool useAA() const override; bool enableSubRegLiveness() const override { diff --git a/llvm/lib/Target/AMDGPU/R600Subtarget.h b/llvm/lib/Target/AMDGPU/R600Subtarget.h index 22e56b66e1827..efd99dbc1a08b 100644 --- a/llvm/lib/Target/AMDGPU/R600Subtarget.h +++ b/llvm/lib/Target/AMDGPU/R600Subtarget.h @@ -126,6 +126,8 @@ class R600Subtarget final : public R600GenSubtargetInfo, return true; } + bool enableTerminalRule() const override { return true; } + bool enableSubRegLiveness() const override { return true; } diff --git a/llvm/lib/Target/AVR/AVRTargetTransformInfo.h b/llvm/lib/Target/AVR/AVRTargetTransformInfo.h index 0daeeb8f11cfe..338a7c8082ca3 100644 --- a/llvm/lib/Target/AVR/AVRTargetTransformInfo.h +++ b/llvm/lib/Target/AVR/AVRTargetTransformInfo.h @@ -21,7 +21,6 @@ #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/CodeGen/BasicTTIImpl.h" #include "llvm/IR/Function.h" -#include namespace llvm { diff --git a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp index 677203d1c016b..95577dd668e1e 100644 --- a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp +++ b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp @@ -29,7 +29,6 @@ #include "llvm/TargetParser/Triple.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include -#include using namespace llvm; using namespace llvm::dxil; diff --git a/llvm/lib/Target/DirectX/DXILRootSignature.h b/llvm/lib/Target/DirectX/DXILRootSignature.h index b990b6c7410ac..ec82aa93dd07c 100644 --- a/llvm/lib/Target/DirectX/DXILRootSignature.h +++ b/llvm/lib/Target/DirectX/DXILRootSignature.h @@ -21,7 +21,6 @@ #include "llvm/IR/PassManager.h" #include "llvm/MC/DXContainerRootSignature.h" #include "llvm/Pass.h" -#include namespace llvm { namespace dxil { diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h index f2c00c7320e11..7cbc092ea3525 100644 --- a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h +++ b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h @@ -19,7 +19,6 @@ #include "llvm/Support/Allocator.h" #include "llvm/Support/MemoryBufferRef.h" #include -#include #include namespace llvm { diff --git a/llvm/lib/Target/M68k/M68kSubtarget.h b/llvm/lib/Target/M68k/M68kSubtarget.h index 16ca7d2e6d0fd..4f9685814d9a9 100644 --- a/llvm/lib/Target/M68k/M68kSubtarget.h +++ b/llvm/lib/Target/M68k/M68kSubtarget.h @@ -27,8 +27,6 @@ #include "llvm/MC/MCInstrItineraries.h" #include "llvm/Support/Alignment.h" -#include - #define GET_SUBTARGETINFO_HEADER #include "M68kGenSubtargetInfo.inc" diff --git a/llvm/lib/Target/PowerPC/PPCSubtarget.h b/llvm/lib/Target/PowerPC/PPCSubtarget.h index f275802fe1843..7d933588025fe 100644 --- a/llvm/lib/Target/PowerPC/PPCSubtarget.h +++ b/llvm/lib/Target/PowerPC/PPCSubtarget.h @@ -23,7 +23,6 @@ #include "llvm/IR/DataLayout.h" #include "llvm/MC/MCInstrItineraries.h" #include "llvm/TargetParser/Triple.h" -#include #define GET_SUBTARGETINFO_HEADER #include "PPCGenSubtargetInfo.inc" diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp index fbb127df16dd9..b8cd9c1358f00 100644 --- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp @@ -249,17 +249,18 @@ static InstrSignature instrToSignature(const MachineInstr &MI, InstrSignature Signature{MI.getOpcode()}; for (unsigned i = 0; i < MI.getNumOperands(); ++i) { // The only decorations that can be applied more than once to a given - // or structure member are UserSemantic(5635), CacheControlLoadINTEL (6442), - // and CacheControlStoreINTEL (6443). For all the rest of decorations, we - // will only add to the signature the Opcode, the id to which it applies, - // and the decoration id, disregarding any decoration flags. This will - // ensure that any subsequent decoration with the same id will be deemed as - // a duplicate. Then, at the call site, we will be able to handle duplicates - // in the best way. + // or structure member are FuncParamAttr (38), UserSemantic (5635), + // CacheControlLoadINTEL (6442), and CacheControlStoreINTEL (6443). For all + // the rest of decorations, we will only add to the signature the Opcode, + // the id to which it applies, and the decoration id, disregarding any + // decoration flags. This will ensure that any subsequent decoration with + // the same id will be deemed as a duplicate. Then, at the call site, we + // will be able to handle duplicates in the best way. unsigned Opcode = MI.getOpcode(); if ((Opcode == SPIRV::OpDecorate) && i >= 2) { unsigned DecorationID = MI.getOperand(1).getImm(); - if (DecorationID != SPIRV::Decoration::UserSemantic && + if (DecorationID != SPIRV::Decoration::FuncParamAttr && + DecorationID != SPIRV::Decoration::UserSemantic && DecorationID != SPIRV::Decoration::CacheControlLoadINTEL && DecorationID != SPIRV::Decoration::CacheControlStoreINTEL) continue; diff --git a/llvm/lib/Target/Sparc/SparcSubtarget.h b/llvm/lib/Target/Sparc/SparcSubtarget.h index b1decca0a4f07..f575f6d7da37f 100644 --- a/llvm/lib/Target/Sparc/SparcSubtarget.h +++ b/llvm/lib/Target/Sparc/SparcSubtarget.h @@ -21,7 +21,6 @@ #include "llvm/IR/DataLayout.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/TargetParser/Triple.h" -#include #define GET_SUBTARGETINFO_HEADER #include "SparcGenSubtargetInfo.inc" diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 05a854a0bf3fa..5bce539c45341 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -635,6 +635,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::FROUNDEVEN, VT, Action); setOperationAction(ISD::FTRUNC, VT, Action); setOperationAction(ISD::FLDEXP, VT, Action); + setOperationAction(ISD::FSINCOSPI, VT, Action); }; if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp index 0c2bd7c302f33..d4ad98af9b30c 100644 --- a/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -50,7 +50,6 @@ #include "llvm/Transforms/CFGuard.h" #include #include -#include using namespace llvm; diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp index c06f33f726e76..c37e530f530f5 100644 --- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp +++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp @@ -1945,6 +1945,10 @@ void InstrLowerer::emitNameData() { NamesVar = new GlobalVariable(M, NamesVal->getType(), true, GlobalValue::PrivateLinkage, NamesVal, getInstrProfNamesVarName()); + if (isGPUProfTarget(M)) { + NamesVar->setLinkage(GlobalValue::ExternalLinkage); + NamesVar->setVisibility(GlobalValue::ProtectedVisibility); + } NamesSize = CompressedNameStr.size(); setGlobalVariableLargeSection(TT, *NamesVar); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 45b5570261416..566d6eafee63e 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1232,6 +1232,30 @@ class LoopVectorizationCostModel { /// Superset of instructions that return true for isScalarWithPredication. bool isPredicatedInst(Instruction *I) const; + /// A helper function that returns how much we should divide the cost of a + /// predicated block by. Typically this is the reciprocal of the block + /// probability, i.e. if we return X we are assuming the predicated block will + /// execute once for every X iterations of the loop header so the block should + /// only contribute 1/X of its cost to the total cost calculation, but when + /// optimizing for code size it will just be 1 as code size costs don't depend + /// on execution probabilities. + /// + /// TODO: We should use actual block probability here, if available. + /// Currently, we always assume predicated blocks have a 50% chance of + /// executing, apart from blocks that are only predicated due to tail folding. + inline unsigned + getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, + BasicBlock *BB) const { + // If a block wasn't originally predicated but was predicated due to + // e.g. tail folding, don't divide the cost. Tail folded loops may still be + // predicated in the final vector loop iteration, but for most loops that + // don't have low trip counts we can expect their probability to be close to + // zero. + if (!Legal->blockNeedsPredication(BB)) + return 1; + return CostKind == TTI::TCK_CodeSize ? 1 : 2; + } + /// Return the costs for our two available strategies for lowering a /// div/rem operation which requires speculating at least one lane. /// First result is for scalarization (will be invalid for scalable @@ -2887,7 +2911,8 @@ LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I, // Scale the cost by the probability of executing the predicated blocks. // This assumes the predicated block for each vector lane is equally // likely. - ScalarizationCost = ScalarizationCost / getPredBlockCostDivisor(CostKind); + ScalarizationCost = + ScalarizationCost / getPredBlockCostDivisor(CostKind, I->getParent()); } InstructionCost SafeDivisorCost = 0; @@ -5032,7 +5057,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount( } // Scale the total scalar cost by block probability. - ScalarCost /= getPredBlockCostDivisor(CostKind); + ScalarCost /= getPredBlockCostDivisor(CostKind, I->getParent()); // Compute the discount. A non-negative discount means the vector version // of the instruction costs more, and scalarizing would be beneficial. @@ -5082,10 +5107,11 @@ InstructionCost LoopVectorizationCostModel::expectedCost(ElementCount VF) { // stores and instructions that may divide by zero) will now be // unconditionally executed. For the scalar case, we may not always execute // the predicated block, if it is an if-else block. Thus, scale the block's - // cost by the probability of executing it. blockNeedsPredication from - // Legal is used so as to not include all blocks in tail folded loops. - if (VF.isScalar() && Legal->blockNeedsPredication(BB)) - BlockCost /= getPredBlockCostDivisor(CostKind); + // cost by the probability of executing it. + // getPredBlockCostDivisor will return 1 for blocks that are only predicated + // by the header mask when folding the tail. + if (VF.isScalar()) + BlockCost /= getPredBlockCostDivisor(CostKind, BB); Cost += BlockCost; } @@ -5164,7 +5190,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, // conditional branches, but may not be executed for each vector lane. Scale // the cost by the probability of executing the predicated block. if (isPredicatedInst(I)) { - Cost /= getPredBlockCostDivisor(CostKind); + Cost /= getPredBlockCostDivisor(CostKind, I->getParent()); // Add the cost of an i1 extract and a branch auto *VecI1Ty = @@ -6732,6 +6758,10 @@ bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const { SkipCostComputation.contains(UI); } +unsigned VPCostContext::getPredBlockCostDivisor(BasicBlock *BB) const { + return CM.getPredBlockCostDivisor(CostKind, BB); +} + InstructionCost LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF, VPCostContext &CostCtx) const { diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 5851b3ab7978c..72858e1265d86 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -959,6 +959,11 @@ class VPIRMetadata { /// Add metadata with kind \p Kind and \p Node. void addMetadata(unsigned Kind, MDNode *Node) { + assert(none_of(Metadata, + [Kind](const std::pair &P) { + return P.first == Kind; + }) && + "Kind must appear at most once in Metadata"); Metadata.emplace_back(Kind, Node); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h index 965426f86ff21..caabfa7275b69 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h +++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h @@ -50,21 +50,6 @@ Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF); Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step); -/// A helper function that returns how much we should divide the cost of a -/// predicated block by. Typically this is the reciprocal of the block -/// probability, i.e. if we return X we are assuming the predicated block will -/// execute once for every X iterations of the loop header so the block should -/// only contribute 1/X of its cost to the total cost calculation, but when -/// optimizing for code size it will just be 1 as code size costs don't depend -/// on execution probabilities. -/// -/// TODO: We should use actual block probability here, if available. Currently, -/// we always assume predicated blocks have a 50% chance of executing. -inline unsigned -getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind) { - return CostKind == TTI::TCK_CodeSize ? 1 : 2; -} - /// A range of powers-of-2 vectorization factors with fixed start and /// adjustable end. The range includes start and excludes end, e.g.,: /// [1, 16) = {1, 2, 4, 8} @@ -367,6 +352,10 @@ struct VPCostContext { /// has already been pre-computed. bool skipCostComputation(Instruction *UI, bool IsVector) const; + /// \returns how much the cost of a predicated block should be divided by. + /// Forwards to LoopVectorizationCostModel::getPredBlockCostDivisor. + unsigned getPredBlockCostDivisor(BasicBlock *BB) const; + /// Returns the OperandInfo for \p V, if it is a live-in. TargetTransformInfo::OperandValueInfo getOperandInfo(VPValue *V) const; diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 80cd112dbcd8a..707886f873fba 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -3349,7 +3349,7 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF, // Scale the cost by the probability of executing the predicated blocks. // This assumes the predicated block for each vector lane is equally // likely. - ScalarCost /= getPredBlockCostDivisor(Ctx.CostKind); + ScalarCost /= Ctx.getPredBlockCostDivisor(UI->getParent()); return ScalarCost; } case Instruction::Load: diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index b319fbc7a78c0..f5bef08fafcdc 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1420,10 +1420,26 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) { // broadcasts. if (!vputils::isSingleScalar(RepOrWidenR) || !all_of(RepOrWidenR->users(), [RepOrWidenR](const VPUser *U) { - return U->usesScalars(RepOrWidenR) || - match(cast(U), - m_CombineOr(m_ExtractLastElement(m_VPValue()), - m_ExtractLastLanePerPart(m_VPValue()))); + if (auto *Store = dyn_cast(U)) { + // VPWidenStore doesn't have users, and stores are always + // profitable to widen: hence, permitting single-scalar stored + // values is an important leaf condition. The assert must hold as + // we checked the RepOrWidenR operand against + // vputils::isSingleScalar. + assert(RepOrWidenR == Store->getAddr() || + vputils::isSingleScalar(Store->getStoredValue())); + return true; + } + + if (auto *VPI = dyn_cast(U)) { + unsigned Opcode = VPI->getOpcode(); + if (Opcode == VPInstruction::ExtractLastElement || + Opcode == VPInstruction::ExtractLastLanePerPart || + Opcode == VPInstruction::ExtractPenultimateElement) + return true; + } + + return U->usesScalars(RepOrWidenR); })) continue; diff --git a/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll b/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll index e086ab92421fb..33ea74912251e 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll @@ -52,12 +52,11 @@ define <2 x i64> @test_mul_sub_2x64_2(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 -; CHECK-NEXT: // kill: def $q3 killed $q3 def $z3 ; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 +; CHECK-NEXT: // kill: def $q3 killed $q3 def $z3 ; CHECK-NEXT: sdiv z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: mul z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: sub v0.2d, v1.2d, v0.2d +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d ; CHECK-NEXT: ret %div = sdiv <2 x i64> %a, %b %mul = mul <2 x i64> %c, %d diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll index 533e831de0df8..258eaabee9376 100644 --- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll +++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll @@ -14,13 +14,12 @@ define @mull_add( %a, @mul_add_rot_mull( %a, , } @llvm.vector.deinterleave2.nxv4f64( %a) diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll index 1eed9722f57be..b68c0094f84de 100644 --- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll +++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll @@ -200,12 +200,10 @@ define @mul_add_rot_mull( %a, @complex_add_v4f16( %a, , } @llvm.vector.deinterleave2.nxv4f16( %a) diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll index 061fd07489284..00b0095e4309c 100644 --- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll +++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll @@ -18,11 +18,10 @@ define @complex_mul_v4i16( %a, @fshl_rot_illegal_i64( %a, @llvm.fshl.nxv4i64( %a, %a, %b) ret %fshl diff --git a/llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll b/llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll new file mode 100644 index 0000000000000..d074d9ae24641 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll @@ -0,0 +1,13 @@ +; RUN: not llc -mtriple=aarch64-gnu-linux -filetype=null %s 2>&1 | FileCheck %s + +; CHECK: error: no libcall available for fsincospi +define { float, float } @test_sincospi_f32(float %a) { + %result = call { float, float } @llvm.sincospi.f32(float %a) + ret { float, float } %result +} + +; CHECK: error: no libcall available for fsincospi +define { double, double } @test_sincospi_f64(double %a) { + %result = call { double, double } @llvm.sincospi.f64(double %a) + ret { double, double } %result +} diff --git a/llvm/test/CodeGen/AArch64/llvm.sincospi.ll b/llvm/test/CodeGen/AArch64/llvm.sincospi.ll index d1d7d92adc05a..b386df077c09d 100644 --- a/llvm/test/CodeGen/AArch64/llvm.sincospi.ll +++ b/llvm/test/CodeGen/AArch64/llvm.sincospi.ll @@ -1,268 +1,250 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=arm64-apple-macosx10.9 < %s | FileCheck %s -define { half, half } @test_sincospi_f16(half %a) { +define { half, half } @test_sincospi_f16(half %a) #0 { ; CHECK-LABEL: test_sincospi_f16: -; CHECK: // %bb.0: -; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK: ; %bb.0: +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: fcvt s0, h0 ; CHECK-NEXT: add x0, sp, #12 ; CHECK-NEXT: add x1, sp, #8 -; CHECK-NEXT: bl sincospif +; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill +; CHECK-NEXT: bl ___sincospif ; CHECK-NEXT: ldp s1, s0, [sp, #8] +; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload ; CHECK-NEXT: fcvt h0, s0 ; CHECK-NEXT: fcvt h1, s1 -; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %result = call { half, half } @llvm.sincospi.f16(half %a) ret { half, half } %result } -define half @test_sincospi_f16_only_use_sin(half %a) { +define half @test_sincospi_f16_only_use_sin(half %a) #0 { ; CHECK-LABEL: test_sincospi_f16_only_use_sin: -; CHECK: // %bb.0: -; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK: ; %bb.0: +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: fcvt s0, h0 ; CHECK-NEXT: add x0, sp, #12 ; CHECK-NEXT: add x1, sp, #8 -; CHECK-NEXT: bl sincospif +; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill +; CHECK-NEXT: bl ___sincospif ; CHECK-NEXT: ldr s0, [sp, #12] +; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload ; CHECK-NEXT: fcvt h0, s0 -; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %result = call { half, half } @llvm.sincospi.f16(half %a) %result.0 = extractvalue { half, half } %result, 0 ret half %result.0 } -define half @test_sincospi_f16_only_use_cos(half %a) { +define half @test_sincospi_f16_only_use_cos(half %a) #0 { ; CHECK-LABEL: test_sincospi_f16_only_use_cos: -; CHECK: // %bb.0: -; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK: ; %bb.0: +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: fcvt s0, h0 ; CHECK-NEXT: add x0, sp, #12 ; CHECK-NEXT: add x1, sp, #8 -; CHECK-NEXT: bl sincospif +; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill +; CHECK-NEXT: bl ___sincospif ; CHECK-NEXT: ldr s0, [sp, #8] +; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload ; CHECK-NEXT: fcvt h0, s0 -; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %result = call { half, half } @llvm.sincospi.f16(half %a) %result.1 = extractvalue { half, half } %result, 1 ret half %result.1 } -define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) { +define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) #0 { ; CHECK-LABEL: test_sincospi_v2f16: -; CHECK: // %bb.0: +; CHECK: ; %bb.0: ; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: mov h1, v0.h[1] -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: add x0, sp, #36 -; CHECK-NEXT: add x1, sp, #32 -; CHECK-NEXT: fcvt s0, h1 -; CHECK-NEXT: bl sincospif -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ; kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: mov h1, v0[1] +; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill ; CHECK-NEXT: add x0, sp, #28 ; CHECK-NEXT: add x1, sp, #24 +; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill +; CHECK-NEXT: fcvt s0, h1 +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload +; CHECK-NEXT: add x0, sp, #20 +; CHECK-NEXT: add x1, sp, #16 ; CHECK-NEXT: fcvt s0, h0 -; CHECK-NEXT: bl sincospif -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload +; CHECK-NEXT: add x0, sp, #36 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: mov h0, v0[2] +; CHECK-NEXT: fcvt s0, h0 +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload ; CHECK-NEXT: add x0, sp, #44 ; CHECK-NEXT: add x1, sp, #40 -; CHECK-NEXT: mov h0, v0.h[2] -; CHECK-NEXT: fcvt s0, h0 -; CHECK-NEXT: bl sincospif -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: add x0, sp, #60 -; CHECK-NEXT: add x1, sp, #56 -; CHECK-NEXT: mov h0, v0.h[3] +; CHECK-NEXT: mov h0, v0[3] ; CHECK-NEXT: fcvt s0, h0 -; CHECK-NEXT: bl sincospif -; CHECK-NEXT: ldp s2, s0, [sp, #32] -; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload -; CHECK-NEXT: ldp s3, s1, [sp, #24] +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldp s2, s0, [sp, #24] +; CHECK-NEXT: ldp s3, s1, [sp, #16] +; CHECK-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload ; CHECK-NEXT: fcvt h4, s0 ; CHECK-NEXT: fcvt h2, s2 ; CHECK-NEXT: fcvt h0, s1 ; CHECK-NEXT: fcvt h1, s3 -; CHECK-NEXT: ldp s5, s3, [sp, #40] +; CHECK-NEXT: ldp s5, s3, [sp, #32] ; CHECK-NEXT: fcvt h3, s3 -; CHECK-NEXT: mov v0.h[1], v4.h[0] +; CHECK-NEXT: mov.h v0[1], v4[0] ; CHECK-NEXT: fcvt h4, s5 -; CHECK-NEXT: mov v1.h[1], v2.h[0] -; CHECK-NEXT: ldp s5, s2, [sp, #56] -; CHECK-NEXT: mov v0.h[2], v3.h[0] +; CHECK-NEXT: mov.h v1[1], v2[0] +; CHECK-NEXT: ldp s5, s2, [sp, #40] +; CHECK-NEXT: mov.h v0[2], v3[0] ; CHECK-NEXT: fcvt h2, s2 ; CHECK-NEXT: fcvt h3, s5 -; CHECK-NEXT: mov v1.h[2], v4.h[0] -; CHECK-NEXT: mov v0.h[3], v2.h[0] -; CHECK-NEXT: mov v1.h[3], v3.h[0] -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1 +; CHECK-NEXT: mov.h v1[2], v4[0] +; CHECK-NEXT: mov.h v0[3], v2[0] +; CHECK-NEXT: mov.h v1[3], v3[0] +; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ; kill: def $d1 killed $d1 killed $q1 ; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret %result = call { <2 x half>, <2 x half> } @llvm.sincospi.v2f16(<2 x half> %a) ret { <2 x half>, <2 x half> } %result } -define { float, float } @test_sincospi_f32(float %a) { +define { float, float } @test_sincospi_f32(float %a) #0 { ; CHECK-LABEL: test_sincospi_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK: ; %bb.0: +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: add x0, sp, #12 ; CHECK-NEXT: add x1, sp, #8 -; CHECK-NEXT: bl sincospif +; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill +; CHECK-NEXT: bl ___sincospif ; CHECK-NEXT: ldp s1, s0, [sp, #8] -; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %result = call { float, float } @llvm.sincospi.f32(float %a) ret { float, float } %result } -define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) { +define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) #0 { ; CHECK-LABEL: test_sincospi_v3f32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w30, -48 -; CHECK-NEXT: add x0, sp, #20 -; CHECK-NEXT: add x1, sp, #16 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl sincospif -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK: ; %bb.0: +; CHECK-NEXT: sub sp, sp, #96 ; CHECK-NEXT: add x0, sp, #28 ; CHECK-NEXT: add x1, sp, #24 -; CHECK-NEXT: add x19, sp, #28 -; CHECK-NEXT: add x20, sp, #24 -; CHECK-NEXT: mov s0, v0.s[1] -; CHECK-NEXT: bl sincospif -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #64] ; 16-byte Folded Spill +; CHECK-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill +; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill +; CHECK-NEXT: ; kill: def $s0 killed $s0 killed $q0 +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload +; CHECK-NEXT: add x0, sp, #36 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: add x19, sp, #36 +; CHECK-NEXT: add x20, sp, #32 +; CHECK-NEXT: mov s0, v0[1] +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload ; CHECK-NEXT: add x0, sp, #44 ; CHECK-NEXT: add x1, sp, #40 ; CHECK-NEXT: add x21, sp, #44 ; CHECK-NEXT: add x22, sp, #40 -; CHECK-NEXT: mov s0, v0.s[2] -; CHECK-NEXT: bl sincospif -; CHECK-NEXT: ldp s1, s0, [sp, #16] -; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: ld1 { v0.s }[1], [x19] -; CHECK-NEXT: ld1 { v1.s }[1], [x20] -; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: ld1 { v0.s }[2], [x21] -; CHECK-NEXT: ld1 { v1.s }[2], [x22] -; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: mov s0, v0[2] +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldp s1, s0, [sp, #24] +; CHECK-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload +; CHECK-NEXT: ld1.s { v0 }[1], [x19] +; CHECK-NEXT: ld1.s { v1 }[1], [x20] +; CHECK-NEXT: ldp x20, x19, [sp, #64] ; 16-byte Folded Reload +; CHECK-NEXT: ld1.s { v0 }[2], [x21] +; CHECK-NEXT: ld1.s { v1 }[2], [x22] +; CHECK-NEXT: ldp x22, x21, [sp, #48] ; 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret %result = call { <3 x float>, <3 x float> } @llvm.sincospi.v3f32(<3 x float> %a) ret { <3 x float>, <3 x float> } %result } -define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) { +define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) #0 { ; CHECK-LABEL: test_sincospi_v2f32: -; CHECK: // %bb.0: +; CHECK: ; %bb.0: ; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: add x0, sp, #44 -; CHECK-NEXT: add x1, sp, #40 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl sincospif -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ; kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: add x0, sp, #28 ; CHECK-NEXT: add x1, sp, #24 -; CHECK-NEXT: add x19, sp, #28 -; CHECK-NEXT: add x20, sp, #24 -; CHECK-NEXT: mov s0, v0.s[1] -; CHECK-NEXT: bl sincospif -; CHECK-NEXT: ldp s1, s0, [sp, #40] -; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: ld1 { v0.s }[1], [x19] -; CHECK-NEXT: ld1 { v1.s }[1], [x20] -; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1 +; CHECK-NEXT: stp x20, x19, [sp, #32] ; 16-byte Folded Spill +; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill +; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill +; CHECK-NEXT: ; kill: def $s0 killed $s0 killed $q0 +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload +; CHECK-NEXT: add x0, sp, #20 +; CHECK-NEXT: add x1, sp, #16 +; CHECK-NEXT: add x19, sp, #20 +; CHECK-NEXT: add x20, sp, #16 +; CHECK-NEXT: mov s0, v0[1] +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldp s1, s0, [sp, #24] +; CHECK-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload +; CHECK-NEXT: ld1.s { v0 }[1], [x19] +; CHECK-NEXT: ld1.s { v1 }[1], [x20] +; CHECK-NEXT: ldp x20, x19, [sp, #32] ; 16-byte Folded Reload +; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ; kill: def $d1 killed $d1 killed $q1 ; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret %result = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> %a) ret { <2 x float>, <2 x float> } %result } -define { double, double } @test_sincospi_f64(double %a) { +define { double, double } @test_sincospi_f64(double %a) #0 { ; CHECK-LABEL: test_sincospi_f64: -; CHECK: // %bb.0: +; CHECK: ; %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: add x0, sp, #24 -; CHECK-NEXT: add x1, sp, #8 -; CHECK-NEXT: bl sincospi -; CHECK-NEXT: ldr d0, [sp, #24] -; CHECK-NEXT: ldr d1, [sp, #8] -; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: add x0, sp, #8 +; CHECK-NEXT: mov x1, sp +; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill +; CHECK-NEXT: bl ___sincospi +; CHECK-NEXT: ldp d1, d0, [sp] +; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %result = call { double, double } @llvm.sincospi.f64(double %a) ret { double, double } %result } -define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) { +define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) #0 { ; CHECK-LABEL: test_sincospi_v2f64: -; CHECK: // %bb.0: +; CHECK: ; %bb.0: ; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: add x0, sp, #56 -; CHECK-NEXT: add x1, sp, #40 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: bl sincospi -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: add x0, sp, #32 -; CHECK-NEXT: add x1, sp, #24 -; CHECK-NEXT: add x19, sp, #32 -; CHECK-NEXT: add x20, sp, #24 -; CHECK-NEXT: mov d0, v0.d[1] -; CHECK-NEXT: bl sincospi -; CHECK-NEXT: ldr d0, [sp, #56] -; CHECK-NEXT: ldr d1, [sp, #40] -; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload -; CHECK-NEXT: ld1 { v0.d }[1], [x19] -; CHECK-NEXT: ld1 { v1.d }[1], [x20] -; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: add x0, sp, #40 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill +; CHECK-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill +; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill +; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: bl ___sincospi +; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload +; CHECK-NEXT: add x0, sp, #24 +; CHECK-NEXT: add x1, sp, #16 +; CHECK-NEXT: add x19, sp, #24 +; CHECK-NEXT: add x20, sp, #16 +; CHECK-NEXT: mov d0, v0[1] +; CHECK-NEXT: bl ___sincospi +; CHECK-NEXT: ldp d1, d0, [sp, #32] +; CHECK-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload +; CHECK-NEXT: ld1.d { v0 }[1], [x19] +; CHECK-NEXT: ld1.d { v1 }[1], [x20] +; CHECK-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload ; CHECK-NEXT: add sp, sp, #80 ; CHECK-NEXT: ret %result = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> %a) ret { <2 x double>, <2 x double> } %result } + +attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir b/llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir new file mode 100644 index 0000000000000..c397953b68f5e --- /dev/null +++ b/llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir @@ -0,0 +1,133 @@ +# RUN: llc -mtriple=aarch64--- -run-pass=machine-outliner -verify-machineinstrs %s -o - | FileCheck %s +--- | + + @x = common global i32 0, align 4 + + define i32 @adrp_add() #0 { + ret i32 0 + } + + define i32 @adrp_ldr() #0 { + ret i32 0 + } + + attributes #0 = { noinline noredzone } +... +--- +# Check that main function body doesn't split ADRP pair +# +# CHECK-LABEL: name: adrp_add +# CHECK-DAG: bb.0: +# CHECK: BL @OUTLINED_FUNCTION_[[F0:[0-9]+]] +# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F2:[0-9]+]] +# CHECK-NEXT: $lr = ORRXri $xzr, 1 +name: adrp_add +tracksRegLiveness: true +body: | + bb.0: + liveins: $lr + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x + $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0 + $lr = ORRXri $xzr, 1 + bb.1: + liveins: $lr + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x + $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0 + $lr = ORRXri $xzr, 1 + bb.2: + liveins: $lr + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x + $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0 + $lr = ORRXri $xzr, 1 + bb.3: + liveins: $lr + RET undef $lr +... +--- +# Check that main function body doesn't split ADRP pair +# +# CHECK-LABEL: name: adrp_ldr +# CHECK-DAG: bb.0: +# CHECK: BL @OUTLINED_FUNCTION_[[F0]] +# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F1:[0-9]+]] +# CHECK-NEXT: $lr = ORRXri $xzr, 1 +name: adrp_ldr +tracksRegLiveness: true +body: | + bb.0: + liveins: $lr + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x + $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x + $lr = ORRXri $xzr, 1 + bb.1: + liveins: $lr + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x + $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x + $lr = ORRXri $xzr, 1 + bb.2: + liveins: $lr + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $w12 = ORRWri $wzr, 1 + $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x + $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x + $lr = ORRXri $xzr, 1 + bb.3: + liveins: $lr + RET undef $lr + +# Check that no outlined function split the ADRP pair apart +# +# CHECK: OUTLINED_FUNCTION_[[F0]] +# CHECK-DAG: bb.0 +# CHECK: $w12 = ORRWri $wzr, 1 +# CHECK-NEXT: $w12 = ORRWri $wzr, 1 +# CHECK-NEXT: $w12 = ORRWri $wzr, 1 +# CHECK-NEXT: $w12 = ORRWri $wzr, 1 +# CHECK-NEXT: $w12 = ORRWri $wzr, 1 +# CHECK-NEXT: RET $lr + +# CHECK: OUTLINED_FUNCTION_[[F1]] +# CHECK-DAG: bb.0 +# CHECK: $w12 = ORRWri $wzr, 1 +# CHECK-NEXT: $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x +# CHECK-NEXT: $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x + +# CHECK: name: OUTLINED_FUNCTION_[[F2]] +# CHECK-DAG: bb.0 +# CHECK: $w12 = ORRWri $wzr, 1 +# CHECK-NEXT: $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x +# CHECK-NEXT: $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0 diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll index 6fbae7edfec0a..2dda03e5c6dab 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll @@ -55,10 +55,9 @@ define void @fadd_v32f16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: fadd z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: fadd z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: fadd z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fadd_v32f16: @@ -154,10 +153,9 @@ define void @fadd_v16f32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: fadd z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: fadd z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: fadd z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fadd_v16f32: @@ -253,10 +251,9 @@ define void @fadd_v8f64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: fadd z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: fadd z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: fadd z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fadd_v8f64: @@ -660,10 +657,9 @@ define void @fma_v32f16(ptr %a, ptr %b, ptr %c) #0 { ; VBITS_GE_256-NEXT: ld1h { z4.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: ld1h { z5.h }, p0/z, [x2] ; VBITS_GE_256-NEXT: fmad z0.h, p0/m, z1.h, z2.h -; VBITS_GE_256-NEXT: movprfx z1, z5 -; VBITS_GE_256-NEXT: fmla z1.h, p0/m, z3.h, z4.h +; VBITS_GE_256-NEXT: fmad z3.h, p0/m, z4.h, z5.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z3.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fma_v32f16: @@ -771,10 +767,9 @@ define void @fma_v16f32(ptr %a, ptr %b, ptr %c) #0 { ; VBITS_GE_256-NEXT: ld1w { z4.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: ld1w { z5.s }, p0/z, [x2] ; VBITS_GE_256-NEXT: fmad z0.s, p0/m, z1.s, z2.s -; VBITS_GE_256-NEXT: movprfx z1, z5 -; VBITS_GE_256-NEXT: fmla z1.s, p0/m, z3.s, z4.s +; VBITS_GE_256-NEXT: fmad z3.s, p0/m, z4.s, z5.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fma_v16f32: @@ -881,10 +876,9 @@ define void @fma_v8f64(ptr %a, ptr %b, ptr %c) #0 { ; VBITS_GE_256-NEXT: ld1d { z4.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: ld1d { z5.d }, p0/z, [x2] ; VBITS_GE_256-NEXT: fmad z0.d, p0/m, z1.d, z2.d -; VBITS_GE_256-NEXT: movprfx z1, z5 -; VBITS_GE_256-NEXT: fmla z1.d, p0/m, z3.d, z4.d +; VBITS_GE_256-NEXT: fmad z3.d, p0/m, z4.d, z5.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fma_v8f64: @@ -990,10 +984,9 @@ define void @fmul_v32f16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: fmul z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: fmul z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmul_v32f16: @@ -1089,10 +1082,9 @@ define void @fmul_v16f32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: fmul z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: fmul z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: fmul z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmul_v16f32: @@ -1188,10 +1180,9 @@ define void @fmul_v8f64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: fmul z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: fmul z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: fmul z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmul_v8f64: @@ -1827,10 +1818,9 @@ define void @fsub_v32f16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: fsub z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: fsub z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: fsub z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fsub_v32f16: @@ -1926,10 +1916,9 @@ define void @fsub_v16f32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: fsub z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: fsub z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: fsub z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fsub_v16f32: @@ -2025,10 +2014,9 @@ define void @fsub_v8f64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: fsub z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: fsub z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: fsub z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fsub_v8f64: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll index e1ec5ee5f6137..633b429db3dfd 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll @@ -64,10 +64,9 @@ define void @fma_v32f16(ptr %a, ptr %b, ptr %c) #0 { ; VBITS_GE_256-NEXT: ld1h { z4.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: ld1h { z5.h }, p0/z, [x2] ; VBITS_GE_256-NEXT: fmad z0.h, p0/m, z1.h, z2.h -; VBITS_GE_256-NEXT: movprfx z1, z5 -; VBITS_GE_256-NEXT: fmla z1.h, p0/m, z3.h, z4.h +; VBITS_GE_256-NEXT: fmad z3.h, p0/m, z4.h, z5.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z3.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fma_v32f16: @@ -181,10 +180,9 @@ define void @fma_v16f32(ptr %a, ptr %b, ptr %c) #0 { ; VBITS_GE_256-NEXT: ld1w { z4.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: ld1w { z5.s }, p0/z, [x2] ; VBITS_GE_256-NEXT: fmad z0.s, p0/m, z1.s, z2.s -; VBITS_GE_256-NEXT: movprfx z1, z5 -; VBITS_GE_256-NEXT: fmla z1.s, p0/m, z3.s, z4.s +; VBITS_GE_256-NEXT: fmad z3.s, p0/m, z4.s, z5.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fma_v16f32: @@ -297,10 +295,9 @@ define void @fma_v8f64(ptr %a, ptr %b, ptr %c) #0 { ; VBITS_GE_256-NEXT: ld1d { z4.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: ld1d { z5.d }, p0/z, [x2] ; VBITS_GE_256-NEXT: fmad z0.d, p0/m, z1.d, z2.d -; VBITS_GE_256-NEXT: movprfx z1, z5 -; VBITS_GE_256-NEXT: fmla z1.d, p0/m, z3.d, z4.d +; VBITS_GE_256-NEXT: fmad z3.d, p0/m, z4.d, z5.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fma_v8f64: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll index de60deeafaf32..90a04995ff15e 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll @@ -55,10 +55,9 @@ define void @fmaxnm_v32f16(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fmaxnm z1.h, p0/m, z1.h, z3.h +; VBITS_EQ_256-NEXT: fmaxnm z2.h, p0/m, z2.h, z3.h ; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmaxnm_v32f16: @@ -154,10 +153,9 @@ define void @fmaxnm_v16f32(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fmaxnm z1.s, p0/m, z1.s, z3.s +; VBITS_EQ_256-NEXT: fmaxnm z2.s, p0/m, z2.s, z3.s ; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmaxnm_v16f32: @@ -253,10 +251,9 @@ define void @fmaxnm_v8f64(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fmaxnm z1.d, p0/m, z1.d, z3.d +; VBITS_EQ_256-NEXT: fmaxnm z2.d, p0/m, z2.d, z3.d ; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmaxnm_v8f64: @@ -356,10 +353,9 @@ define void @fminnm_v32f16(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fminnm z0.h, p0/m, z0.h, z1.h -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fminnm z1.h, p0/m, z1.h, z3.h +; VBITS_EQ_256-NEXT: fminnm z2.h, p0/m, z2.h, z3.h ; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fminnm_v32f16: @@ -455,10 +451,9 @@ define void @fminnm_v16f32(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fminnm z0.s, p0/m, z0.s, z1.s -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fminnm z1.s, p0/m, z1.s, z3.s +; VBITS_EQ_256-NEXT: fminnm z2.s, p0/m, z2.s, z3.s ; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fminnm_v16f32: @@ -554,10 +549,9 @@ define void @fminnm_v8f64(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fminnm z0.d, p0/m, z0.d, z1.d -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fminnm z1.d, p0/m, z1.d, z3.d +; VBITS_EQ_256-NEXT: fminnm z2.d, p0/m, z2.d, z3.d ; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fminnm_v8f64: @@ -657,10 +651,9 @@ define void @fmax_v32f16(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fmax z0.h, p0/m, z0.h, z1.h -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fmax z1.h, p0/m, z1.h, z3.h +; VBITS_EQ_256-NEXT: fmax z2.h, p0/m, z2.h, z3.h ; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmax_v32f16: @@ -756,10 +749,9 @@ define void @fmax_v16f32(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fmax z0.s, p0/m, z0.s, z1.s -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fmax z1.s, p0/m, z1.s, z3.s +; VBITS_EQ_256-NEXT: fmax z2.s, p0/m, z2.s, z3.s ; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmax_v16f32: @@ -855,10 +847,9 @@ define void @fmax_v8f64(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fmax z0.d, p0/m, z0.d, z1.d -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fmax z1.d, p0/m, z1.d, z3.d +; VBITS_EQ_256-NEXT: fmax z2.d, p0/m, z2.d, z3.d ; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmax_v8f64: @@ -958,10 +949,9 @@ define void @fmin_v32f16(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fmin z0.h, p0/m, z0.h, z1.h -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fmin z1.h, p0/m, z1.h, z3.h +; VBITS_EQ_256-NEXT: fmin z2.h, p0/m, z2.h, z3.h ; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmin_v32f16: @@ -1057,10 +1047,9 @@ define void @fmin_v16f32(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fmin z0.s, p0/m, z0.s, z1.s -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fmin z1.s, p0/m, z1.s, z3.s +; VBITS_EQ_256-NEXT: fmin z2.s, p0/m, z2.s, z3.s ; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmin_v16f32: @@ -1156,10 +1145,9 @@ define void @fmin_v8f64(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_EQ_256-NEXT: fmin z0.d, p0/m, z0.d, z1.d -; VBITS_EQ_256-NEXT: movprfx z1, z2 -; VBITS_EQ_256-NEXT: fmin z1.d, p0/m, z1.d, z3.d +; VBITS_EQ_256-NEXT: fmin z2.d, p0/m, z2.d, z3.d ; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_GE_512-LABEL: fmin_v8f64: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll index 08a974fa2d9f4..a91b392b7230a 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll @@ -155,10 +155,9 @@ define void @sabd_v64i8_v64i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: sabd z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: sabd z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: sabd z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: sabd_v64i8_v64i64: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll index 58fca3a2cf8b6..736239599836c 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll @@ -456,10 +456,9 @@ define void @mul_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: mul z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: mul z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: mul z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: mul_v64i8: @@ -555,10 +554,9 @@ define void @mul_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: mul z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: mul z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: mul z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: mul_v32i16: @@ -654,10 +652,9 @@ define void @mul_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: mul z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: mul z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: mul z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: mul_v16i32: @@ -759,10 +756,9 @@ define void @mul_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: mul z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: mul z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: mul z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: mul_v8i64: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll index 4926684ddc2de..c56376887d966 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll @@ -55,10 +55,9 @@ define void @smax_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: smax z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smax z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: smax z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smax_v64i8: @@ -154,10 +153,9 @@ define void @smax_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: smax z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smax z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: smax z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smax_v32i16: @@ -253,10 +251,9 @@ define void @smax_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: smax z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smax z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: smax z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smax_v16i32: @@ -360,10 +357,9 @@ define void @smax_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: smax z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smax z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: smax z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smax_v8i64: @@ -463,10 +459,9 @@ define void @smin_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: smin z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smin z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: smin z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smin_v64i8: @@ -562,10 +557,9 @@ define void @smin_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: smin z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smin z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: smin z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smin_v32i16: @@ -661,10 +655,9 @@ define void @smin_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: smin z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smin z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: smin z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smin_v16i32: @@ -768,10 +761,9 @@ define void @smin_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: smin z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smin z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: smin z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smin_v8i64: @@ -871,10 +863,9 @@ define void @umax_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: umax z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umax z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: umax z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umax_v64i8: @@ -970,10 +961,9 @@ define void @umax_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: umax z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umax z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: umax z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umax_v32i16: @@ -1069,10 +1059,9 @@ define void @umax_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: umax z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umax z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: umax z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umax_v16i32: @@ -1176,10 +1165,9 @@ define void @umax_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: umax z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umax z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: umax z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umax_v8i64: @@ -1279,10 +1267,9 @@ define void @umin_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: umin z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umin z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: umin z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umin_v64i8: @@ -1378,10 +1365,9 @@ define void @umin_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: umin z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umin z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: umin z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umin_v32i16: @@ -1477,10 +1463,9 @@ define void @umin_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: umin z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umin z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: umin z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umin_v16i32: @@ -1584,10 +1569,9 @@ define void @umin_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: umin z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umin z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: umin z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umin_v8i64: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll index 41cce354cc9de..dfbc23707e418 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll @@ -78,10 +78,9 @@ define void @smulh_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: smulh z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smulh z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: smulh z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smulh_v64i8: @@ -209,10 +208,9 @@ define void @smulh_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: smulh z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smulh z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: smulh z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smulh_v32i16: @@ -340,10 +338,9 @@ define void @smulh_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: smulh z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smulh z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: smulh z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smulh_v16i32: @@ -471,10 +468,9 @@ define void @smulh_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: smulh z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: smulh z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: smulh z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: smulh_v8i64: @@ -607,10 +603,9 @@ define void @umulh_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: umulh z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umulh z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: umulh z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umulh_v64i8: @@ -739,10 +734,9 @@ define void @umulh_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: umulh z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umulh z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: umulh z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umulh_v32i16: @@ -870,10 +864,9 @@ define void @umulh_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: umulh z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umulh z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: umulh z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umulh_v16i32: @@ -1001,10 +994,9 @@ define void @umulh_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: umulh z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: umulh z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: umulh z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: umulh_v8i64: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll index 27be84419d59e..14204e965fb4d 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll @@ -616,10 +616,9 @@ define void @srem_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: movprfx z5, z3 ; VBITS_GE_256-NEXT: sdiv z5.s, p0/m, z5.s, z4.s ; VBITS_GE_256-NEXT: mls z0.s, p0/m, z2.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z3 -; VBITS_GE_256-NEXT: mls z1.s, p0/m, z5.s, z4.s +; VBITS_GE_256-NEXT: mls z3.s, p0/m, z5.s, z4.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: srem_v16i32: @@ -744,11 +743,10 @@ define void @srem_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_128-NEXT: movprfx z18, z16 ; VBITS_GE_128-NEXT: sdiv z18.d, p0/m, z18.d, z17.d ; VBITS_GE_128-NEXT: msb z0.d, p0/m, z4.d, z1.d -; VBITS_GE_128-NEXT: movprfx z1, z2 -; VBITS_GE_128-NEXT: mls z1.d, p0/m, z19.d, z3.d +; VBITS_GE_128-NEXT: mls z2.d, p0/m, z19.d, z3.d ; VBITS_GE_128-NEXT: mls z16.d, p0/m, z18.d, z17.d ; VBITS_GE_128-NEXT: mls z5.d, p0/m, z7.d, z6.d -; VBITS_GE_128-NEXT: stp q0, q1, [x0] +; VBITS_GE_128-NEXT: stp q0, q2, [x0] ; VBITS_GE_128-NEXT: stp q16, q5, [x0, #32] ; VBITS_GE_128-NEXT: ret ; @@ -765,10 +763,9 @@ define void @srem_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: movprfx z5, z3 ; VBITS_GE_256-NEXT: sdiv z5.d, p0/m, z5.d, z4.d ; VBITS_GE_256-NEXT: mls z0.d, p0/m, z2.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z3 -; VBITS_GE_256-NEXT: mls z1.d, p0/m, z5.d, z4.d +; VBITS_GE_256-NEXT: mls z3.d, p0/m, z5.d, z4.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: srem_v8i64: @@ -1434,10 +1431,9 @@ define void @urem_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: movprfx z5, z3 ; VBITS_GE_256-NEXT: udiv z5.s, p0/m, z5.s, z4.s ; VBITS_GE_256-NEXT: mls z0.s, p0/m, z2.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z3 -; VBITS_GE_256-NEXT: mls z1.s, p0/m, z5.s, z4.s +; VBITS_GE_256-NEXT: mls z3.s, p0/m, z5.s, z4.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: urem_v16i32: @@ -1562,11 +1558,10 @@ define void @urem_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_128-NEXT: movprfx z18, z16 ; VBITS_GE_128-NEXT: udiv z18.d, p0/m, z18.d, z17.d ; VBITS_GE_128-NEXT: msb z0.d, p0/m, z4.d, z1.d -; VBITS_GE_128-NEXT: movprfx z1, z2 -; VBITS_GE_128-NEXT: mls z1.d, p0/m, z19.d, z3.d +; VBITS_GE_128-NEXT: mls z2.d, p0/m, z19.d, z3.d ; VBITS_GE_128-NEXT: mls z16.d, p0/m, z18.d, z17.d ; VBITS_GE_128-NEXT: mls z5.d, p0/m, z7.d, z6.d -; VBITS_GE_128-NEXT: stp q0, q1, [x0] +; VBITS_GE_128-NEXT: stp q0, q2, [x0] ; VBITS_GE_128-NEXT: stp q16, q5, [x0, #32] ; VBITS_GE_128-NEXT: ret ; @@ -1583,10 +1578,9 @@ define void @urem_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: movprfx z5, z3 ; VBITS_GE_256-NEXT: udiv z5.d, p0/m, z5.d, z4.d ; VBITS_GE_256-NEXT: mls z0.d, p0/m, z2.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z3 -; VBITS_GE_256-NEXT: mls z1.d, p0/m, z5.d, z4.d +; VBITS_GE_256-NEXT: mls z3.d, p0/m, z5.d, z4.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: urem_v8i64: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll index 0fa8c8f50e29c..a8afa90df96e4 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll @@ -57,10 +57,9 @@ define void @ashr_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: asr z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: asr z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: asr z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: ashr_v64i8: @@ -158,10 +157,9 @@ define void @ashr_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: asr z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: asr z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: asr z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: ashr_v32i16: @@ -259,10 +257,9 @@ define void @ashr_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: asr z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: asr z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: asr z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: ashr_v16i32: @@ -360,10 +357,9 @@ define void @ashr_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: asr z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: asr z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: asr z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: ashr_v8i64: @@ -465,10 +461,9 @@ define void @lshr_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: lsr z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: lsr z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: lsr z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: lshr_v64i8: @@ -566,10 +561,9 @@ define void @lshr_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: lsr z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: lsr z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: lsr z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: lshr_v32i16: @@ -667,10 +661,9 @@ define void @lshr_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: lsr z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: lsr z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: lsr z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: lshr_v16i32: @@ -768,10 +761,9 @@ define void @lshr_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: lsr z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: lsr z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: lsr z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: lshr_v8i64: @@ -871,10 +863,9 @@ define void @shl_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: lsl z0.b, p0/m, z0.b, z1.b -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: lsl z1.b, p0/m, z1.b, z3.b +; VBITS_GE_256-NEXT: lsl z2.b, p0/m, z2.b, z3.b ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: shl_v64i8: @@ -970,10 +961,9 @@ define void @shl_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: lsl z0.h, p0/m, z0.h, z1.h -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: lsl z1.h, p0/m, z1.h, z3.h +; VBITS_GE_256-NEXT: lsl z2.h, p0/m, z2.h, z3.h ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: shl_v32i16: @@ -1069,10 +1059,9 @@ define void @shl_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: lsl z0.s, p0/m, z0.s, z1.s -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: lsl z1.s, p0/m, z1.s, z3.s +; VBITS_GE_256-NEXT: lsl z2.s, p0/m, z2.s, z3.s ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: shl_v16i32: @@ -1168,10 +1157,9 @@ define void @shl_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: lsl z0.d, p0/m, z0.d, z1.d -; VBITS_GE_256-NEXT: movprfx z1, z2 -; VBITS_GE_256-NEXT: lsl z1.d, p0/m, z1.d, z3.d +; VBITS_GE_256-NEXT: lsl z2.d, p0/m, z2.d, z3.d ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: shl_v8i64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll index f2c882c370eab..20c06f0a1aff5 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll @@ -193,9 +193,8 @@ define void @fadd_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fadd z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fadd_v16f16: @@ -397,9 +396,8 @@ define void @fadd_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fadd z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fadd_v8f32: @@ -479,9 +477,8 @@ define void @fadd_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fadd_v4f64: @@ -703,9 +700,8 @@ define void @fdiv_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fdivr z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fdiv z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fdiv z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fdiv_v16f16: @@ -907,9 +903,8 @@ define void @fdiv_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fdivr z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fdiv z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fdiv z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fdiv_v8f32: @@ -989,9 +984,8 @@ define void @fdiv_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fdivr z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fdiv z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fdiv z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fdiv_v4f64: @@ -1253,9 +1247,8 @@ define void @fma_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldp q1, q5, [x2] ; CHECK-NEXT: ldp q2, q3, [x0] ; CHECK-NEXT: fmad z0.h, p0/m, z2.h, z1.h -; CHECK-NEXT: movprfx z1, z5 -; CHECK-NEXT: fmla z1.h, p0/m, z3.h, z4.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmad z3.h, p0/m, z4.h, z5.h +; CHECK-NEXT: stp q0, q3, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fma_v16f16: @@ -1501,9 +1494,8 @@ define void @fma_v8f32(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldp q1, q5, [x2] ; CHECK-NEXT: ldp q2, q3, [x0] ; CHECK-NEXT: fmad z0.s, p0/m, z2.s, z1.s -; CHECK-NEXT: movprfx z1, z5 -; CHECK-NEXT: fmla z1.s, p0/m, z3.s, z4.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmad z3.s, p0/m, z4.s, z5.s +; CHECK-NEXT: stp q0, q3, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fma_v8f32: @@ -1595,9 +1587,8 @@ define void @fma_v4f64(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldp q1, q5, [x2] ; CHECK-NEXT: ldp q2, q3, [x0] ; CHECK-NEXT: fmad z0.d, p0/m, z2.d, z1.d -; CHECK-NEXT: movprfx z1, z5 -; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z4.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmad z3.d, p0/m, z4.d, z5.d +; CHECK-NEXT: stp q0, q3, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fma_v4f64: @@ -1824,9 +1815,8 @@ define void @fmul_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmul z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmul_v16f16: @@ -2028,9 +2018,8 @@ define void @fmul_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmul z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmul_v8f32: @@ -2110,9 +2099,8 @@ define void @fmul_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmul z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmul z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmul_v4f64: @@ -3152,9 +3140,8 @@ define void @fsub_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fsub z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fsub z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fsub_v16f16: @@ -3356,9 +3343,8 @@ define void @fsub_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fsub z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fsub z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fsub_v8f32: @@ -3438,9 +3424,8 @@ define void @fsub_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fsub z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fsub z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fsub_v4f64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll index 680cb4fb0a791..dbacd77315198 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll @@ -208,9 +208,8 @@ define void @fma_v16f16(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldp q1, q5, [x2] ; CHECK-NEXT: ldp q2, q3, [x0] ; CHECK-NEXT: fmad z0.h, p0/m, z2.h, z1.h -; CHECK-NEXT: movprfx z1, z5 -; CHECK-NEXT: fmla z1.h, p0/m, z3.h, z4.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmad z3.h, p0/m, z4.h, z5.h +; CHECK-NEXT: stp q0, q3, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fma_v16f16: @@ -526,9 +525,8 @@ define void @fma_v8f32(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldp q1, q5, [x2] ; CHECK-NEXT: ldp q2, q3, [x0] ; CHECK-NEXT: fmad z0.s, p0/m, z2.s, z1.s -; CHECK-NEXT: movprfx z1, z5 -; CHECK-NEXT: fmla z1.s, p0/m, z3.s, z4.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmad z3.s, p0/m, z4.s, z5.s +; CHECK-NEXT: stp q0, q3, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fma_v8f32: @@ -642,9 +640,8 @@ define void @fma_v4f64(ptr %a, ptr %b, ptr %c) { ; CHECK-NEXT: ldp q1, q5, [x2] ; CHECK-NEXT: ldp q2, q3, [x0] ; CHECK-NEXT: fmad z0.d, p0/m, z2.d, z1.d -; CHECK-NEXT: movprfx z1, z5 -; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z4.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmad z3.d, p0/m, z4.d, z5.d +; CHECK-NEXT: stp q0, q3, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fma_v4f64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll index 84aea185917fa..e53d6a9081154 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll @@ -143,9 +143,8 @@ define void @fmaxnm_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmaxnm z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmaxnm z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmaxnm_v16f16: @@ -347,9 +346,8 @@ define void @fmaxnm_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmaxnm z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmaxnm z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmaxnm_v8f32: @@ -448,9 +446,8 @@ define void @fmaxnm_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmaxnm z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmaxnm z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmaxnm_v4f64: @@ -622,9 +619,8 @@ define void @fminnm_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fminnm z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fminnm z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fminnm_v16f16: @@ -826,9 +822,8 @@ define void @fminnm_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fminnm z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fminnm z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fminnm_v8f32: @@ -927,9 +922,8 @@ define void @fminnm_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fminnm z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fminnm z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fminnm_v4f64: @@ -1101,9 +1095,8 @@ define void @fmax_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmax z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmax z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmax_v16f16: @@ -1305,9 +1298,8 @@ define void @fmax_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmax z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmax z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmax_v8f32: @@ -1406,9 +1398,8 @@ define void @fmax_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmax z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmax z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmax_v4f64: @@ -1580,9 +1571,8 @@ define void @fmin_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmin z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmin z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmin_v16f16: @@ -1784,9 +1774,8 @@ define void @fmin_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmin z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmin z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmin_v8f32: @@ -1885,9 +1874,8 @@ define void @fmin_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fmin z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fmin z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fmin_v4f64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll index 4360f3a12014a..02b5469c0ff85 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll @@ -975,9 +975,8 @@ define void @mul_v32i8(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.b, vl16 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: mul z0.b, p0/m, z0.b, z1.b -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: mul z1.b, p0/m, z1.b, z3.b -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: mul z2.b, p0/m, z2.b, z3.b +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: mul_v32i8: @@ -1286,9 +1285,8 @@ define void @mul_v16i16(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.h, vl8 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: mul z0.h, p0/m, z0.h, z1.h -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: mul z1.h, p0/m, z1.h, z3.h -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: mul z2.h, p0/m, z2.h, z3.h +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: mul_v16i16: @@ -1467,9 +1465,8 @@ define void @mul_v8i32(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.s, vl4 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: mul z0.s, p0/m, z0.s, z1.s -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: mul z1.s, p0/m, z1.s, z3.s -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: mul z2.s, p0/m, z2.s, z3.s +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: mul_v8i32: @@ -1599,9 +1596,8 @@ define void @mul_v4i64(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.d, vl2 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: mul z0.d, p0/m, z0.d, z1.d -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: mul z1.d, p0/m, z1.d, z3.d -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: mul z2.d, p0/m, z2.d, z3.d +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: mul_v4i64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll index 1fdcd4f826870..8e1d61b51e2bb 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll @@ -779,9 +779,8 @@ define void @sdiv_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: sdivr z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: sdiv z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: sdiv z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: sdiv_v8i32: @@ -886,9 +885,8 @@ define void @sdiv_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: sdivr z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: sdiv z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: sdiv z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: sdiv_v4i64: @@ -1693,9 +1691,8 @@ define void @udiv_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: udivr z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: udiv z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: udiv z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: udiv_v8i32: @@ -1800,9 +1797,8 @@ define void @udiv_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: udivr z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: udiv z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: udiv z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: udiv_v4i64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll index 1bca7dd09d9b7..d858d8171926e 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll @@ -179,9 +179,8 @@ define void @smax_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.b, vl16 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: smax z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: smax z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: smax_v32i8: @@ -473,9 +472,8 @@ define void @smax_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: smax z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: smax z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: smax_v16i16: @@ -651,9 +649,8 @@ define void @smax_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: smax z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: smax z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: smax_v8i32: @@ -771,9 +768,8 @@ define void @smax_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: smax z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: smax z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: smax_v4i64: @@ -985,9 +981,8 @@ define void @smin_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.b, vl16 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: smin z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: smin z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: smin_v32i8: @@ -1279,9 +1274,8 @@ define void @smin_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: smin z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: smin z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: smin_v16i16: @@ -1457,9 +1451,8 @@ define void @smin_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: smin z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: smin z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: smin_v8i32: @@ -1577,9 +1570,8 @@ define void @smin_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: smin z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: smin z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: smin_v4i64: @@ -1791,9 +1783,8 @@ define void @umax_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.b, vl16 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: umax z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: umax z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: umax_v32i8: @@ -2085,9 +2076,8 @@ define void @umax_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: umax z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: umax z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: umax_v16i16: @@ -2263,9 +2253,8 @@ define void @umax_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: umax z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: umax z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: umax_v8i32: @@ -2383,9 +2372,8 @@ define void @umax_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: umax z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: umax z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: umax_v4i64: @@ -2597,9 +2585,8 @@ define void @umin_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.b, vl16 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: umin z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: umin z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: umin_v32i8: @@ -2891,9 +2878,8 @@ define void @umin_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: umin z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: umin z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: umin_v16i16: @@ -3069,9 +3055,8 @@ define void @umin_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: umin z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: umin z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: umin_v8i32: @@ -3189,9 +3174,8 @@ define void @umin_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: umin z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: umin z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: umin_v4i64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll index 0c97eedd4362d..85b7b4d010062 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll @@ -294,9 +294,8 @@ define void @smulh_v32i8(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.b, vl16 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: smulh z0.b, p0/m, z0.b, z1.b -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: smulh z1.b, p0/m, z1.b, z3.b -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: smulh z2.b, p0/m, z2.b, z3.b +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: smulh_v32i8: @@ -755,9 +754,8 @@ define void @smulh_v16i16(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.h, vl8 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: smulh z0.h, p0/m, z0.h, z1.h -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: smulh z1.h, p0/m, z1.h, z3.h -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: smulh z2.h, p0/m, z2.h, z3.h +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: smulh_v16i16: @@ -1001,9 +999,8 @@ define void @smulh_v8i32(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.s, vl4 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: smulh z0.s, p0/m, z0.s, z1.s -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: smulh z1.s, p0/m, z1.s, z3.s -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: smulh z2.s, p0/m, z2.s, z3.s +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: smulh_v8i32: @@ -1159,9 +1156,8 @@ define void @smulh_v4i64(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.d, vl2 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: smulh z0.d, p0/m, z0.d, z1.d -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: smulh z1.d, p0/m, z1.d, z3.d -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: smulh z2.d, p0/m, z2.d, z3.d +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: smulh_v4i64: @@ -1494,9 +1490,8 @@ define void @umulh_v32i8(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.b, vl16 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: umulh z0.b, p0/m, z0.b, z1.b -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: umulh z1.b, p0/m, z1.b, z3.b -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: umulh z2.b, p0/m, z2.b, z3.b +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: umulh_v32i8: @@ -1954,9 +1949,8 @@ define void @umulh_v16i16(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.h, vl8 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: umulh z0.h, p0/m, z0.h, z1.h -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: umulh z1.h, p0/m, z1.h, z3.h -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: umulh z2.h, p0/m, z2.h, z3.h +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: umulh_v16i16: @@ -2200,9 +2194,8 @@ define void @umulh_v8i32(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.s, vl4 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: umulh z0.s, p0/m, z0.s, z1.s -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: umulh z1.s, p0/m, z1.s, z3.s -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: umulh z2.s, p0/m, z2.s, z3.s +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: umulh_v8i32: @@ -2358,9 +2351,8 @@ define void @umulh_v4i64(ptr %a, ptr %b) { ; SVE-NEXT: ptrue p0.d, vl2 ; SVE-NEXT: ldp q1, q2, [x0] ; SVE-NEXT: umulh z0.d, p0/m, z0.d, z1.d -; SVE-NEXT: movprfx z1, z2 -; SVE-NEXT: umulh z1.d, p0/m, z1.d, z3.d -; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: umulh z2.d, p0/m, z2.d, z3.d +; SVE-NEXT: stp q0, q2, [x0] ; SVE-NEXT: ret ; ; SVE2-LABEL: umulh_v4i64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll index 372f6a06bf64b..c4b6c0e6e924c 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll @@ -883,9 +883,8 @@ define void @srem_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: movprfx z5, z2 ; CHECK-NEXT: sdiv z5.s, p0/m, z5.s, z3.s ; CHECK-NEXT: msb z0.s, p0/m, z4.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: mls z1.s, p0/m, z5.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: mls z2.s, p0/m, z5.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: srem_v8i32: @@ -1013,9 +1012,8 @@ define void @srem_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: movprfx z5, z2 ; CHECK-NEXT: sdiv z5.d, p0/m, z5.d, z3.d ; CHECK-NEXT: msb z0.d, p0/m, z4.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: mls z1.d, p0/m, z5.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: mls z2.d, p0/m, z5.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: srem_v4i64: @@ -1933,9 +1931,8 @@ define void @urem_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: movprfx z5, z2 ; CHECK-NEXT: udiv z5.s, p0/m, z5.s, z3.s ; CHECK-NEXT: msb z0.s, p0/m, z4.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: mls z1.s, p0/m, z5.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: mls z2.s, p0/m, z5.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: urem_v8i32: @@ -2063,9 +2060,8 @@ define void @urem_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: movprfx z5, z2 ; CHECK-NEXT: udiv z5.d, p0/m, z5.d, z3.d ; CHECK-NEXT: msb z0.d, p0/m, z4.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: mls z1.d, p0/m, z5.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: mls z2.d, p0/m, z5.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: urem_v4i64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll index d0f99211e80fc..4cf8945575ded 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll @@ -195,9 +195,8 @@ define void @ashr_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.b, vl16 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: asrr z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: asr z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: asr z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: ashr_v32i8: @@ -476,9 +475,8 @@ define void @ashr_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: asrr z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: asr z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: asr z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: ashr_v16i16: @@ -632,9 +630,8 @@ define void @ashr_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: asrr z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: asr z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: asr z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: ashr_v8i32: @@ -739,9 +736,8 @@ define void @ashr_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: asrr z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: asr z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: asr z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: ashr_v4i64: @@ -965,9 +961,8 @@ define void @lshr_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.b, vl16 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: lsrr z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsr z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: lsr z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: lshr_v32i8: @@ -1246,9 +1241,8 @@ define void @lshr_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: lsrr z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: lsr z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: lshr_v16i16: @@ -1402,9 +1396,8 @@ define void @lshr_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: lsrr z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsr z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: lsr z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: lshr_v8i32: @@ -1509,9 +1502,8 @@ define void @lshr_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: lsrr z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsr z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: lsr z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: lshr_v4i64: @@ -1764,9 +1756,8 @@ define void @shl_v32i8(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.b, vl16 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: lslr z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsl z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: lsl z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: shl_v32i8: @@ -2014,9 +2005,8 @@ define void @shl_v16i16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: lslr z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: lsl z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: shl_v16i16: @@ -2170,9 +2160,8 @@ define void @shl_v8i32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: lslr z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsl z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: lsl z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: shl_v8i32: @@ -2277,9 +2266,8 @@ define void @shl_v4i64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: lslr z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: lsl z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: shl_v4i64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll index 74e5fe7352cfd..e9b2f539b30cc 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll @@ -954,9 +954,8 @@ define void @fadd_v16f16(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fadd z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fadd_v16f16: @@ -1170,9 +1169,8 @@ define void @fadd_v8f32(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fadd z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fadd_v8f32: @@ -1258,9 +1256,8 @@ define void @fadd_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q1, q2, [x0] ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: stp q0, q2, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: fadd_v4f64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll index e0e88c47fb55c..e78671aaddf18 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll @@ -526,10 +526,9 @@ define void @zip_v4f64(ptr %a, ptr %b) { ; CHECK-NEXT: zip1 z5.d, z0.d, z2.d ; CHECK-NEXT: trn2 z1.d, z1.d, z3.d ; CHECK-NEXT: trn2 z0.d, z0.d, z2.d -; CHECK-NEXT: movprfx z2, z4 -; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z5.d +; CHECK-NEXT: fadd z4.d, p0/m, z4.d, z5.d ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: stp q2, q0, [x0] +; CHECK-NEXT: stp q4, q0, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: zip_v4f64: @@ -2159,10 +2158,9 @@ define void @zip_vscale2_4(ptr %a, ptr %b) { ; CHECK-NEXT: zip1 z5.d, z0.d, z2.d ; CHECK-NEXT: trn2 z1.d, z1.d, z3.d ; CHECK-NEXT: trn2 z0.d, z0.d, z2.d -; CHECK-NEXT: movprfx z2, z4 -; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z5.d +; CHECK-NEXT: fadd z4.d, p0/m, z4.d, z5.d ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: stp q2, q0, [x0] +; CHECK-NEXT: stp q4, q0, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: zip_vscale2_4: diff --git a/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll b/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll index 6af26067cd6d6..0472d5c1935f5 100644 --- a/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll +++ b/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll @@ -36,10 +36,9 @@ define i32 @test( %bin.rdx, %bin.rdx2) { ; CHECK-NEXT: mla z0.s, p0/m, z25.s, z24.s ; CHECK-NEXT: mad z2.s, p0/m, z6.s, z4.s ; CHECK-NEXT: mad z1.s, p0/m, z3.s, z26.s -; CHECK-NEXT: movprfx z3, z5 -; CHECK-NEXT: mla z3.s, p0/m, z28.s, z7.s +; CHECK-NEXT: mla z5.s, p0/m, z28.s, z7.s ; CHECK-NEXT: add z0.s, z2.s, z0.s -; CHECK-NEXT: add z1.s, z3.s, z1.s +; CHECK-NEXT: add z1.s, z5.s, z1.s ; CHECK-NEXT: add z0.s, z1.s, z0.s ; CHECK-NEXT: uaddv d0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 diff --git a/llvm/test/CodeGen/AArch64/sve2-xar.ll b/llvm/test/CodeGen/AArch64/sve2-xar.ll index 888e94d42f449..8f6f4510d8388 100644 --- a/llvm/test/CodeGen/AArch64/sve2-xar.ll +++ b/llvm/test/CodeGen/AArch64/sve2-xar.ll @@ -157,10 +157,9 @@ define @xar_nxv2i64_l_neg1( %x, %x, %y %b = call @llvm.fshl.nxv2i64( %a, %a, %z) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll index dd01112d97a18..c1e6b4fffa82d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll @@ -21,14 +21,14 @@ define void @divergent_i1_phi_used_outside_loop(float %val, float %pre.cond.val, ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s6 ; GFX10-NEXT: s_mov_b32 s8, exec_lo +; GFX10-NEXT: s_mov_b32 s9, s5 ; GFX10-NEXT: s_add_i32 s6, s6, 1 -; GFX10-NEXT: s_xor_b32 s8, s5, s8 +; GFX10-NEXT: s_xor_b32 s5, s5, s8 ; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v1, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 s7, s7, exec_lo -; GFX10-NEXT: s_and_b32 s9, exec_lo, s5 -; GFX10-NEXT: s_mov_b32 s5, s8 -; GFX10-NEXT: s_or_b32 s7, s7, s9 +; GFX10-NEXT: s_and_b32 s8, exec_lo, s9 +; GFX10-NEXT: s_or_b32 s7, s7, s8 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB0_1 ; GFX10-NEXT: ; %bb.2: ; %exit @@ -240,11 +240,11 @@ define void @divergent_i1_xor_used_outside_loop_larger_loop_body(i32 %num.elts, ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo -; GFX10-NEXT: s_mov_b32 s8, 0 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_and_saveexec_b32 s7, vcc_lo ; GFX10-NEXT: s_cbranch_execz .LBB4_6 ; GFX10-NEXT: ; %bb.1: ; %loop.start.preheader -; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: s_mov_b32 s8, 0 ; GFX10-NEXT: ; implicit-def: $sgpr10 ; GFX10-NEXT: ; implicit-def: $sgpr11 ; GFX10-NEXT: ; implicit-def: $sgpr9 @@ -345,8 +345,8 @@ define void @divergent_i1_icmp_used_outside_loop(i32 %v0, i32 %v1, ptr addrspace ; GFX10-LABEL: divergent_i1_icmp_used_outside_loop: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: s_mov_b32 s6, 0 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: s_mov_b32 s6, 0 ; GFX10-NEXT: ; implicit-def: $sgpr7 ; GFX10-NEXT: s_branch .LBB5_2 ; GFX10-NEXT: .LBB5_1: ; %Flow @@ -457,8 +457,8 @@ define amdgpu_ps void @divergent_i1_freeze_used_outside_loop(i32 %n, ptr addrspa ; GFX10-LABEL: divergent_i1_freeze_used_outside_loop: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_mov_b32 s1, exec_lo -; GFX10-NEXT: s_mov_b32 s2, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 +; GFX10-NEXT: s_mov_b32 s2, 0 ; GFX10-NEXT: ; implicit-def: $sgpr4 ; GFX10-NEXT: ; implicit-def: $sgpr3 ; GFX10-NEXT: s_branch .LBB6_2 @@ -534,8 +534,8 @@ exit: define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %a.break) { ; GFX10-LABEL: loop_with_1break: ; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: ; implicit-def: $sgpr6 ; GFX10-NEXT: ; implicit-def: $sgpr7 ; GFX10-NEXT: ; implicit-def: $sgpr5 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll index fd08ab88990ed..484536bd27f4e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll @@ -106,8 +106,8 @@ exit: define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, ptr addrspace(1) %a) { ; GFX10-LABEL: loop_with_1break: ; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: ; implicit-def: $sgpr5 ; GFX10-NEXT: s_branch .LBB2_2 ; GFX10-NEXT: .LBB2_1: ; %Flow @@ -180,8 +180,8 @@ exit: define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %b) { ; GFX10-LABEL: loop_with_2breaks: ; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: ; implicit-def: $sgpr5 ; GFX10-NEXT: s_branch .LBB3_3 ; GFX10-NEXT: .LBB3_1: ; %Flow3 @@ -278,8 +278,8 @@ exit: define amdgpu_cs void @loop_with_3breaks(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c) { ; GFX10-LABEL: loop_with_3breaks: ; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: ; implicit-def: $sgpr5 ; GFX10-NEXT: s_branch .LBB4_4 ; GFX10-NEXT: .LBB4_1: ; %Flow5 @@ -404,8 +404,8 @@ exit: define amdgpu_cs void @loop_with_div_break_with_body(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %a.break) { ; GFX10-LABEL: loop_with_div_break_with_body: ; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: ; implicit-def: $sgpr6 ; GFX10-NEXT: ; implicit-def: $sgpr7 ; GFX10-NEXT: ; implicit-def: $sgpr5 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll index d13d6a19d332a..69baf613fdfe5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll @@ -101,8 +101,8 @@ define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, i32 %x.size, ptr ad ; GFX10-LABEL: loop_with_1break: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: s_mov_b32 s8, 0 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: s_mov_b32 s8, 0 ; GFX10-NEXT: ; implicit-def: $sgpr10 ; GFX10-NEXT: ; implicit-def: $sgpr9 ; GFX10-NEXT: s_branch .LBB2_3 @@ -197,14 +197,14 @@ define void @nested_loops_temporal_divergence_inner(float %pre.cond.val, i32 %n. ; GFX10-LABEL: nested_loops_temporal_divergence_inner: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0 -; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v0 ; GFX10-NEXT: s_mov_b32 s6, 0 +; GFX10-NEXT: s_mov_b32 s8, 0 ; GFX10-NEXT: .LBB3_1: ; %OuterHeader ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB3_2 Depth 2 ; GFX10-NEXT: s_ashr_i32 s7, s6, 31 -; GFX10-NEXT: s_mov_b32 s4, s8 +; GFX10-NEXT: s_mov_b32 s4, s5 ; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2 ; GFX10-NEXT: ; implicit-def: $sgpr9 ; GFX10-NEXT: v_mov_b32_e32 v6, s10 @@ -239,13 +239,13 @@ define void @nested_loops_temporal_divergence_inner(float %pre.cond.val, i32 %n. ; GFX10-NEXT: s_add_i32 s6, s6, 1 ; GFX10-NEXT: v_add_co_u32 v6, s4, v4, v6 ; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, v5, v7, s4 -; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 +; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8 ; GFX10-NEXT: flat_store_byte v[6:7], v0 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8 ; GFX10-NEXT: s_cbranch_execnz .LBB3_1 ; GFX10-NEXT: ; %bb.4: ; %exit -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] entry: @@ -288,14 +288,14 @@ define void @nested_loops_temporal_divergence_outer(float %pre.cond.val, i32 %n. ; GFX10-LABEL: nested_loops_temporal_divergence_outer: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0 -; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v0 ; GFX10-NEXT: s_mov_b32 s6, 0 +; GFX10-NEXT: s_mov_b32 s8, 0 ; GFX10-NEXT: .LBB4_1: ; %OuterHeader ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB4_2 Depth 2 ; GFX10-NEXT: s_ashr_i32 s7, s6, 31 -; GFX10-NEXT: s_mov_b32 s4, s8 +; GFX10-NEXT: s_mov_b32 s4, s5 ; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2 ; GFX10-NEXT: ; implicit-def: $sgpr9 ; GFX10-NEXT: v_mov_b32_e32 v6, s10 @@ -330,13 +330,13 @@ define void @nested_loops_temporal_divergence_outer(float %pre.cond.val, i32 %n. ; GFX10-NEXT: s_add_i32 s6, s6, 1 ; GFX10-NEXT: v_add_co_u32 v6, s4, v4, v6 ; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, v5, v7, s4 -; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 +; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8 ; GFX10-NEXT: flat_store_byte v[6:7], v0 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8 ; GFX10-NEXT: s_cbranch_execnz .LBB4_1 ; GFX10-NEXT: ; %bb.4: ; %exit -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] entry: @@ -379,15 +379,15 @@ define void @nested_loops_temporal_divergence_both(float %pre.cond.val, i32 %n.i ; GFX10-LABEL: nested_loops_temporal_divergence_both: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0 -; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v0 ; GFX10-NEXT: s_mov_b32 s6, 0 +; GFX10-NEXT: s_mov_b32 s8, 0 ; GFX10-NEXT: ; implicit-def: $sgpr9 ; GFX10-NEXT: .LBB5_1: ; %OuterHeader ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB5_2 Depth 2 ; GFX10-NEXT: s_ashr_i32 s7, s6, 31 -; GFX10-NEXT: s_mov_b32 s4, s8 +; GFX10-NEXT: s_mov_b32 s4, s5 ; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2 ; GFX10-NEXT: v_mov_b32_e32 v8, s10 ; GFX10-NEXT: v_mov_b32_e32 v9, s11 @@ -421,13 +421,13 @@ define void @nested_loops_temporal_divergence_both(float %pre.cond.val, i32 %n.i ; GFX10-NEXT: s_add_i32 s6, s6, 1 ; GFX10-NEXT: v_add_co_u32 v8, s4, v4, v8 ; GFX10-NEXT: v_add_co_ci_u32_e64 v9, s4, v5, v9, s4 -; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 +; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8 ; GFX10-NEXT: flat_store_byte v[8:9], v0 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8 ; GFX10-NEXT: s_cbranch_execnz .LBB5_1 ; GFX10-NEXT: ; %bb.4: ; %exit -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8 ; GFX10-NEXT: flat_store_byte v[6:7], v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll index 5240bf4f3a1d7..9aaa9635a7da1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll @@ -547,8 +547,8 @@ define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) % ; ; NEW_RBS-LABEL: loop_with_2breaks: ; NEW_RBS: ; %bb.0: ; %entry -; NEW_RBS-NEXT: s_mov_b32 s4, 0 ; NEW_RBS-NEXT: s_mov_b32 s0, 0 +; NEW_RBS-NEXT: s_mov_b32 s4, 0 ; NEW_RBS-NEXT: ; implicit-def: $sgpr5 ; NEW_RBS-NEXT: s_branch .LBB16_3 ; NEW_RBS-NEXT: .LBB16_1: ; %Flow3 diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll index 54b1554ae5d04..df77e7de43bf6 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll @@ -40,34 +40,33 @@ define amdgpu_kernel void @udiv_i32(ptr addrspace(1) %out, i32 %x, i32 %y) { ; GFX6-LABEL: udiv_i32: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3 -; GFX6-NEXT: s_sub_i32 s4, 0, s3 -; GFX6-NEXT: s_mov_b32 s5, s1 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5 +; GFX6-NEXT: s_sub_i32 s2, 0, s5 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 +; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0 +; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1 ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0 -; GFX6-NEXT: v_readfirstlane_b32 s0, v0 -; GFX6-NEXT: s_mul_i32 s0, s0, s3 -; GFX6-NEXT: s_sub_i32 s0, s2, s0 -; GFX6-NEXT: s_sub_i32 s1, s0, s3 +; GFX6-NEXT: v_mul_hi_u32 v0, s4, v0 +; GFX6-NEXT: v_readfirstlane_b32 s6, v0 +; GFX6-NEXT: s_mul_i32 s6, s6, s5 +; GFX6-NEXT: s_sub_i32 s4, s4, s6 +; GFX6-NEXT: s_sub_i32 s6, s4, s5 ; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0 -; GFX6-NEXT: s_cmp_ge_u32 s0, s3 +; GFX6-NEXT: s_cmp_ge_u32 s4, s5 ; GFX6-NEXT: s_cselect_b64 vcc, -1, 0 ; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX6-NEXT: s_cselect_b32 s0, s1, s0 +; GFX6-NEXT: s_cselect_b32 s4, s6, s4 ; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0 -; GFX6-NEXT: s_cmp_ge_u32 s0, s3 +; GFX6-NEXT: s_cmp_ge_u32 s4, s5 ; GFX6-NEXT: s_cselect_b64 vcc, -1, 0 ; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: udiv_i32: @@ -138,31 +137,30 @@ define amdgpu_kernel void @urem_i32(ptr addrspace(1) %out, i32 %x, i32 %y) { ; GFX6-LABEL: urem_i32: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3 -; GFX6-NEXT: s_sub_i32 s4, 0, s3 -; GFX6-NEXT: s_mov_b32 s5, s1 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5 +; GFX6-NEXT: s_sub_i32 s2, 0, s5 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 +; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0 +; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1 ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0 -; GFX6-NEXT: v_readfirstlane_b32 s0, v0 -; GFX6-NEXT: s_mul_i32 s0, s0, s3 -; GFX6-NEXT: s_sub_i32 s0, s2, s0 -; GFX6-NEXT: s_sub_i32 s1, s0, s3 -; GFX6-NEXT: s_cmp_ge_u32 s0, s3 -; GFX6-NEXT: s_cselect_b32 s0, s1, s0 -; GFX6-NEXT: s_sub_i32 s1, s0, s3 -; GFX6-NEXT: s_cmp_ge_u32 s0, s3 -; GFX6-NEXT: s_cselect_b32 s0, s1, s0 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: v_mul_hi_u32 v0, s4, v0 +; GFX6-NEXT: v_readfirstlane_b32 s6, v0 +; GFX6-NEXT: s_mul_i32 s6, s6, s5 +; GFX6-NEXT: s_sub_i32 s4, s4, s6 +; GFX6-NEXT: s_sub_i32 s6, s4, s5 +; GFX6-NEXT: s_cmp_ge_u32 s4, s5 +; GFX6-NEXT: s_cselect_b32 s4, s6, s4 +; GFX6-NEXT: s_sub_i32 s6, s4, s5 +; GFX6-NEXT: s_cmp_ge_u32 s4, s5 +; GFX6-NEXT: s_cselect_b32 s4, s6, s4 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: urem_i32: @@ -242,40 +240,39 @@ define amdgpu_kernel void @sdiv_i32(ptr addrspace(1) %out, i32 %x, i32 %y) { ; GFX6-LABEL: sdiv_i32: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_abs_i32 s8, s3 -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8 -; GFX6-NEXT: s_sub_i32 s4, 0, s8 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: s_xor_b32 s1, s2, s3 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_abs_i32 s6, s5 +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX6-NEXT: s_sub_i32 s2, 0, s6 +; GFX6-NEXT: s_abs_i32 s7, s4 +; GFX6-NEXT: s_xor_b32 s4, s4, s5 ; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0 -; GFX6-NEXT: s_ashr_i32 s1, s1, 31 +; GFX6-NEXT: s_ashr_i32 s4, s4, 31 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_abs_i32 s0, s2 +; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0 +; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1 ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_mul_hi_u32 v0, s0, v0 -; GFX6-NEXT: v_readfirstlane_b32 s2, v0 -; GFX6-NEXT: s_mul_i32 s2, s2, s8 -; GFX6-NEXT: s_sub_i32 s0, s0, s2 -; GFX6-NEXT: s_sub_i32 s2, s0, s8 +; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0 +; GFX6-NEXT: v_readfirstlane_b32 s5, v0 +; GFX6-NEXT: s_mul_i32 s5, s5, s6 +; GFX6-NEXT: s_sub_i32 s5, s7, s5 +; GFX6-NEXT: s_sub_i32 s7, s5, s6 ; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0 -; GFX6-NEXT: s_cmp_ge_u32 s0, s8 +; GFX6-NEXT: s_cmp_ge_u32 s5, s6 ; GFX6-NEXT: s_cselect_b64 vcc, -1, 0 ; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX6-NEXT: s_cselect_b32 s0, s2, s0 +; GFX6-NEXT: s_cselect_b32 s5, s7, s5 ; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0 -; GFX6-NEXT: s_cmp_ge_u32 s0, s8 +; GFX6-NEXT: s_cmp_ge_u32 s5, s6 ; GFX6-NEXT: s_cselect_b64 vcc, -1, 0 ; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX6-NEXT: v_xor_b32_e32 v0, s1, v0 -; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s1, v0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: v_xor_b32_e32 v0, s4, v0 +; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s4, v0 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: sdiv_i32: @@ -360,36 +357,35 @@ define amdgpu_kernel void @srem_i32(ptr addrspace(1) %out, i32 %x, i32 %y) { ; GFX6-LABEL: srem_i32: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_abs_i32 s3, s3 -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3 -; GFX6-NEXT: s_sub_i32 s4, 0, s3 -; GFX6-NEXT: s_abs_i32 s8, s2 -; GFX6-NEXT: s_mov_b32 s5, s1 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_abs_i32 s5, s5 +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5 +; GFX6-NEXT: s_sub_i32 s2, 0, s5 +; GFX6-NEXT: s_abs_i32 s6, s4 +; GFX6-NEXT: s_ashr_i32 s4, s4, 31 ; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_ashr_i32 s0, s2, 31 +; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0 +; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1 ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0 -; GFX6-NEXT: v_readfirstlane_b32 s1, v0 -; GFX6-NEXT: s_mul_i32 s1, s1, s3 -; GFX6-NEXT: s_sub_i32 s1, s8, s1 -; GFX6-NEXT: s_sub_i32 s2, s1, s3 -; GFX6-NEXT: s_cmp_ge_u32 s1, s3 -; GFX6-NEXT: s_cselect_b32 s1, s2, s1 -; GFX6-NEXT: s_sub_i32 s2, s1, s3 -; GFX6-NEXT: s_cmp_ge_u32 s1, s3 -; GFX6-NEXT: s_cselect_b32 s1, s2, s1 -; GFX6-NEXT: s_xor_b32 s1, s1, s0 -; GFX6-NEXT: s_sub_i32 s0, s1, s0 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0 +; GFX6-NEXT: v_readfirstlane_b32 s7, v0 +; GFX6-NEXT: s_mul_i32 s7, s7, s5 +; GFX6-NEXT: s_sub_i32 s6, s6, s7 +; GFX6-NEXT: s_sub_i32 s7, s6, s5 +; GFX6-NEXT: s_cmp_ge_u32 s6, s5 +; GFX6-NEXT: s_cselect_b32 s6, s7, s6 +; GFX6-NEXT: s_sub_i32 s7, s6, s5 +; GFX6-NEXT: s_cmp_ge_u32 s6, s5 +; GFX6-NEXT: s_cselect_b32 s5, s7, s6 +; GFX6-NEXT: s_xor_b32 s5, s5, s4 +; GFX6-NEXT: s_sub_i32 s4, s5, s4 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: srem_i32: @@ -5462,15 +5458,14 @@ define amdgpu_kernel void @udiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x ; GFX6-LABEL: udiv_i32_pow2_shl_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_add_i32 s0, s3, 12 -; GFX6-NEXT: s_lshr_b32 s0, s2, s0 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_add_i32 s5, s5, 12 +; GFX6-NEXT: s_lshr_b32 s4, s4, s5 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: udiv_i32_pow2_shl_denom: @@ -5503,16 +5498,15 @@ define amdgpu_kernel void @udiv_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3 ; GFX6-LABEL: udiv_v2i32_pow2k_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: s_lshr_b32 s0, s2, 12 -; GFX6-NEXT: s_lshr_b32 s1, s3, 12 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: v_mov_b32_e32 v1, s1 -; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_lshr_b32 s4, s4, 12 +; GFX6-NEXT: s_lshr_b32 s5, s5, 12 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: v_mov_b32_e32 v1, s5 +; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: udiv_v2i32_pow2k_denom: @@ -5546,19 +5540,18 @@ define amdgpu_kernel void @udiv_v2i32_mixed_pow2k_denom(ptr addrspace(1) %out, < ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; GFX6-NEXT: v_mov_b32_e32 v0, 0x100101 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_mul_hi_u32 v0, s3, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_lshr_b32 s0, s2, 12 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s3, v0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: v_mul_hi_u32 v0, s5, v0 +; GFX6-NEXT: s_lshr_b32 s4, s4, 12 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s5, v0 ; GFX6-NEXT: v_lshrrev_b32_e32 v1, 1, v1 ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v1, v0 ; GFX6-NEXT: v_lshrrev_b32_e32 v1, 11, v0 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: udiv_v2i32_mixed_pow2k_denom: @@ -5855,16 +5848,15 @@ define amdgpu_kernel void @urem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x ; GFX6-LABEL: urem_i32_pow2_shl_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_lshl_b32 s0, 0x1000, s3 -; GFX6-NEXT: s_add_i32 s0, s0, -1 -; GFX6-NEXT: s_and_b32 s0, s2, s0 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_lshl_b32 s5, 0x1000, s5 +; GFX6-NEXT: s_add_i32 s5, s5, -1 +; GFX6-NEXT: s_and_b32 s4, s4, s5 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: urem_i32_pow2_shl_denom: @@ -5898,16 +5890,15 @@ define amdgpu_kernel void @urem_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3 ; GFX6-LABEL: urem_v2i32_pow2k_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: s_and_b32 s0, s2, 0xfff -; GFX6-NEXT: s_and_b32 s1, s3, 0xfff -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: v_mov_b32_e32 v1, s1 -; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_and_b32 s4, s4, 0xfff +; GFX6-NEXT: s_and_b32 s5, s5, 0xfff +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: v_mov_b32_e32 v1, s5 +; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: urem_v2i32_pow2k_denom: @@ -6187,41 +6178,40 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x ; GFX6-LABEL: sdiv_i32_pow2_shl_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3 -; GFX6-NEXT: s_abs_i32 s8, s3 -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8 -; GFX6-NEXT: s_sub_i32 s4, 0, s8 -; GFX6-NEXT: s_abs_i32 s9, s2 -; GFX6-NEXT: s_mov_b32 s5, s1 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_lshl_b32 s5, 0x1000, s5 +; GFX6-NEXT: s_abs_i32 s6, s5 +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX6-NEXT: s_sub_i32 s2, 0, s6 +; GFX6-NEXT: s_abs_i32 s7, s4 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 +; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0 +; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1 ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_mul_hi_u32 v0, s9, v0 -; GFX6-NEXT: v_readfirstlane_b32 s0, v0 -; GFX6-NEXT: s_mul_i32 s0, s0, s8 -; GFX6-NEXT: s_sub_i32 s0, s9, s0 -; GFX6-NEXT: s_sub_i32 s1, s0, s8 +; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0 +; GFX6-NEXT: v_readfirstlane_b32 s8, v0 +; GFX6-NEXT: s_mul_i32 s8, s8, s6 +; GFX6-NEXT: s_sub_i32 s7, s7, s8 +; GFX6-NEXT: s_sub_i32 s8, s7, s6 ; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0 -; GFX6-NEXT: s_cmp_ge_u32 s0, s8 +; GFX6-NEXT: s_cmp_ge_u32 s7, s6 ; GFX6-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX6-NEXT: s_cselect_b32 s0, s1, s0 +; GFX6-NEXT: s_cselect_b32 s7, s8, s7 ; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0 -; GFX6-NEXT: s_cmp_ge_u32 s0, s8 +; GFX6-NEXT: s_cmp_ge_u32 s7, s6 ; GFX6-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX6-NEXT: s_xor_b32 s0, s2, s3 +; GFX6-NEXT: s_xor_b32 s4, s4, s5 ; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX6-NEXT: s_ashr_i32 s0, s0, 31 -; GFX6-NEXT: v_xor_b32_e32 v0, s0, v0 -; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: s_ashr_i32 s4, s4, 31 +; GFX6-NEXT: v_xor_b32_e32 v0, s4, v0 +; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s4, v0 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: sdiv_i32_pow2_shl_denom: @@ -6279,22 +6269,21 @@ define amdgpu_kernel void @sdiv_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3 ; GFX6-LABEL: sdiv_v2i32_pow2k_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: s_ashr_i32 s0, s2, 31 -; GFX6-NEXT: s_ashr_i32 s1, s3, 31 -; GFX6-NEXT: s_lshr_b32 s0, s0, 20 -; GFX6-NEXT: s_lshr_b32 s1, s1, 20 -; GFX6-NEXT: s_add_i32 s0, s2, s0 -; GFX6-NEXT: s_add_i32 s1, s3, s1 -; GFX6-NEXT: s_ashr_i32 s0, s0, 12 -; GFX6-NEXT: s_ashr_i32 s1, s1, 12 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: v_mov_b32_e32 v1, s1 -; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_ashr_i32 s6, s4, 31 +; GFX6-NEXT: s_ashr_i32 s7, s5, 31 +; GFX6-NEXT: s_lshr_b32 s6, s6, 20 +; GFX6-NEXT: s_lshr_b32 s7, s7, 20 +; GFX6-NEXT: s_add_i32 s4, s4, s6 +; GFX6-NEXT: s_add_i32 s5, s5, s7 +; GFX6-NEXT: s_ashr_i32 s4, s4, 12 +; GFX6-NEXT: s_ashr_i32 s5, s5, 12 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: v_mov_b32_e32 v1, s5 +; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: sdiv_v2i32_pow2k_denom: @@ -6334,22 +6323,21 @@ define amdgpu_kernel void @ssdiv_v2i32_mixed_pow2k_denom(ptr addrspace(1) %out, ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; GFX6-NEXT: v_mov_b32_e32 v0, 0x80080081 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_mul_hi_i32 v0, s3, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_ashr_i32 s0, s2, 31 -; GFX6-NEXT: s_lshr_b32 s0, s0, 20 -; GFX6-NEXT: s_add_i32 s0, s2, s0 -; GFX6-NEXT: v_add_i32_e32 v0, vcc, s3, v0 -; GFX6-NEXT: s_ashr_i32 s0, s0, 12 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: v_mul_hi_i32 v0, s5, v0 +; GFX6-NEXT: s_ashr_i32 s6, s4, 31 +; GFX6-NEXT: s_lshr_b32 s6, s6, 20 +; GFX6-NEXT: s_add_i32 s4, s4, s6 +; GFX6-NEXT: v_add_i32_e32 v0, vcc, s5, v0 +; GFX6-NEXT: s_ashr_i32 s4, s4, 12 ; GFX6-NEXT: v_lshrrev_b32_e32 v1, 31, v0 ; GFX6-NEXT: v_ashrrev_i32_e32 v0, 11, v0 -; GFX6-NEXT: s_mov_b32 s5, s1 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: v_add_i32_e32 v1, vcc, v0, v1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: ssdiv_v2i32_mixed_pow2k_denom: @@ -6700,37 +6688,36 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x ; GFX6-LABEL: srem_i32_pow2_shl_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3 -; GFX6-NEXT: s_abs_i32 s3, s3 -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3 -; GFX6-NEXT: s_sub_i32 s4, 0, s3 -; GFX6-NEXT: s_abs_i32 s8, s2 -; GFX6-NEXT: s_mov_b32 s5, s1 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_lshl_b32 s2, 0x1000, s5 +; GFX6-NEXT: s_abs_i32 s5, s2 +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5 +; GFX6-NEXT: s_sub_i32 s2, 0, s5 +; GFX6-NEXT: s_abs_i32 s6, s4 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 +; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0 +; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1 ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0 -; GFX6-NEXT: v_readfirstlane_b32 s0, v0 -; GFX6-NEXT: s_mul_i32 s0, s0, s3 -; GFX6-NEXT: s_sub_i32 s0, s8, s0 -; GFX6-NEXT: s_sub_i32 s1, s0, s3 -; GFX6-NEXT: s_cmp_ge_u32 s0, s3 -; GFX6-NEXT: s_cselect_b32 s0, s1, s0 -; GFX6-NEXT: s_sub_i32 s1, s0, s3 -; GFX6-NEXT: s_cmp_ge_u32 s0, s3 -; GFX6-NEXT: s_cselect_b32 s0, s1, s0 -; GFX6-NEXT: s_ashr_i32 s1, s2, 31 -; GFX6-NEXT: s_xor_b32 s0, s0, s1 -; GFX6-NEXT: s_sub_i32 s0, s0, s1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0 +; GFX6-NEXT: v_readfirstlane_b32 s7, v0 +; GFX6-NEXT: s_mul_i32 s7, s7, s5 +; GFX6-NEXT: s_sub_i32 s6, s6, s7 +; GFX6-NEXT: s_sub_i32 s7, s6, s5 +; GFX6-NEXT: s_cmp_ge_u32 s6, s5 +; GFX6-NEXT: s_cselect_b32 s6, s7, s6 +; GFX6-NEXT: s_sub_i32 s7, s6, s5 +; GFX6-NEXT: s_cmp_ge_u32 s6, s5 +; GFX6-NEXT: s_cselect_b32 s5, s7, s6 +; GFX6-NEXT: s_ashr_i32 s4, s4, 31 +; GFX6-NEXT: s_xor_b32 s5, s5, s4 +; GFX6-NEXT: s_sub_i32 s4, s5, s4 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: srem_i32_pow2_shl_denom: @@ -6785,24 +6772,23 @@ define amdgpu_kernel void @srem_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3 ; GFX6-LABEL: srem_v2i32_pow2k_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: s_ashr_i32 s0, s2, 31 -; GFX6-NEXT: s_ashr_i32 s1, s3, 31 -; GFX6-NEXT: s_lshr_b32 s0, s0, 20 -; GFX6-NEXT: s_lshr_b32 s1, s1, 20 -; GFX6-NEXT: s_add_i32 s0, s2, s0 -; GFX6-NEXT: s_add_i32 s1, s3, s1 -; GFX6-NEXT: s_and_b32 s0, s0, 0xfffff000 -; GFX6-NEXT: s_and_b32 s1, s1, 0xfffff000 -; GFX6-NEXT: s_sub_i32 s0, s2, s0 -; GFX6-NEXT: s_sub_i32 s1, s3, s1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: v_mov_b32_e32 v1, s1 -; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_ashr_i32 s6, s4, 31 +; GFX6-NEXT: s_lshr_b32 s6, s6, 20 +; GFX6-NEXT: s_ashr_i32 s7, s5, 31 +; GFX6-NEXT: s_add_i32 s6, s4, s6 +; GFX6-NEXT: s_lshr_b32 s7, s7, 20 +; GFX6-NEXT: s_and_b32 s6, s6, 0xfffff000 +; GFX6-NEXT: s_sub_i32 s4, s4, s6 +; GFX6-NEXT: s_add_i32 s6, s5, s7 +; GFX6-NEXT: s_and_b32 s6, s6, 0xfffff000 +; GFX6-NEXT: s_sub_i32 s5, s5, s6 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: v_mov_b32_e32 v1, s5 +; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: srem_v2i32_pow2k_denom: diff --git a/llvm/test/CodeGen/AMDGPU/and.ll b/llvm/test/CodeGen/AMDGPU/and.ll index 29bfc253e2e7e..fe9ec8e6ef52a 100644 --- a/llvm/test/CodeGen/AMDGPU/and.ll +++ b/llvm/test/CodeGen/AMDGPU/and.ll @@ -123,27 +123,25 @@ define amdgpu_kernel void @s_and_i32(ptr addrspace(1) %out, i32 %a, i32 %b) { ; GFX6-LABEL: s_and_i32: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_and_b32 s0, s2, s3 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_and_b32 s4, s4, s5 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX8-LABEL: s_and_i32: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX8-NEXT: s_mov_b32 s7, 0xf000 -; GFX8-NEXT: s_mov_b32 s6, -1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_mov_b32 s4, s0 -; GFX8-NEXT: s_and_b32 s0, s2, s3 -; GFX8-NEXT: s_mov_b32 s5, s1 -; GFX8-NEXT: v_mov_b32_e32 v0, s0 -; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX8-NEXT: s_and_b32 s4, s4, s5 +; GFX8-NEXT: s_mov_b32 s3, 0xf000 +; GFX8-NEXT: s_mov_b32 s2, -1 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX8-NEXT: s_endpgm %and = and i32 %a, %b store i32 %and, ptr addrspace(1) %out, align 4 @@ -189,36 +187,34 @@ define amdgpu_kernel void @s_and_multi_use_constant_i32_0(ptr addrspace(1) %out, ; GFX6-LABEL: s_and_multi_use_constant_i32_0: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 -; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_and_b32 s0, s2, 0x12d687 -; GFX6-NEXT: s_add_i32 s0, s0, s3 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 -; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, 0x12d687 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_and_b32 s4, s4, 0x12d687 +; GFX6-NEXT: s_add_i32 s4, s4, s5 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v1, s4 +; GFX6-NEXT: buffer_store_dword v1, off, s[0:3], 0 +; GFX6-NEXT: s_waitcnt vmcnt(0) +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: s_endpgm ; ; GFX8-LABEL: s_and_multi_use_constant_i32_0: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX8-NEXT: s_mov_b32 s7, 0xf000 -; GFX8-NEXT: s_mov_b32 s6, -1 +; GFX8-NEXT: v_mov_b32_e32 v0, 0x12d687 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_mov_b32 s4, s0 -; GFX8-NEXT: s_and_b32 s0, s2, 0x12d687 -; GFX8-NEXT: s_add_i32 s0, s0, s3 -; GFX8-NEXT: s_mov_b32 s5, s1 -; GFX8-NEXT: v_mov_b32_e32 v0, s0 -; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX8-NEXT: s_and_b32 s4, s4, 0x12d687 +; GFX8-NEXT: s_add_i32 s4, s4, s5 +; GFX8-NEXT: s_mov_b32 s3, 0xf000 +; GFX8-NEXT: s_mov_b32 s2, -1 +; GFX8-NEXT: v_mov_b32_e32 v1, s4 +; GFX8-NEXT: buffer_store_dword v1, off, s[0:3], 0 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v0, 0x12d687 -; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_endpgm %and = and i32 %a, 1234567 @@ -236,32 +232,30 @@ define amdgpu_kernel void @s_and_multi_use_constant_i32_1(ptr addrspace(1) %out, ; GFX6-LABEL: s_and_multi_use_constant_i32_1: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_and_b32 s0, s2, 0x12d687 -; GFX6-NEXT: s_add_i32 s0, s0, s3 -; GFX6-NEXT: s_add_i32 s0, s0, 0x12d687 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_and_b32 s4, s4, 0x12d687 +; GFX6-NEXT: s_add_i32 s4, s4, s5 +; GFX6-NEXT: s_add_i32 s4, s4, 0x12d687 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: s_endpgm ; ; GFX8-LABEL: s_and_multi_use_constant_i32_1: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX8-NEXT: s_mov_b32 s7, 0xf000 -; GFX8-NEXT: s_mov_b32 s6, -1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_mov_b32 s4, s0 -; GFX8-NEXT: s_and_b32 s0, s2, 0x12d687 -; GFX8-NEXT: s_add_i32 s0, s0, s3 -; GFX8-NEXT: s_add_i32 s0, s0, 0x12d687 -; GFX8-NEXT: s_mov_b32 s5, s1 -; GFX8-NEXT: v_mov_b32_e32 v0, s0 -; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX8-NEXT: s_and_b32 s4, s4, 0x12d687 +; GFX8-NEXT: s_add_i32 s4, s4, s5 +; GFX8-NEXT: s_add_i32 s4, s4, 0x12d687 +; GFX8-NEXT: s_mov_b32 s3, 0xf000 +; GFX8-NEXT: s_mov_b32 s2, -1 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_endpgm %and = and i32 %a, 1234567 diff --git a/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll b/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll index c14678cafc7a4..c0d5f8a9d1c3b 100644 --- a/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll +++ b/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll @@ -120,17 +120,17 @@ define amdgpu_kernel void @s_ubfe_sub_i32(ptr addrspace(1) %out, i32 %src, i32 % ; SI-LABEL: s_ubfe_sub_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, 0 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 -; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_sub_i32 s3, 32, s3 -; SI-NEXT: s_lshl_b32 s2, s2, s3 -; SI-NEXT: s_lshr_b32 s2, s2, s3 ; SI-NEXT: v_mov_b32_e32 v1, 0 -; SI-NEXT: s_mov_b64 s[4:5], s[0:1] -; SI-NEXT: v_mov_b32_e32 v2, s2 -; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_sub_i32 s2, 32, s5 +; SI-NEXT: s_lshl_b32 s4, s4, s2 +; SI-NEXT: s_lshr_b32 s4, s4, s2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 ; SI-NEXT: s_endpgm ; ; VI-LABEL: s_ubfe_sub_i32: @@ -160,20 +160,20 @@ define amdgpu_kernel void @s_ubfe_sub_multi_use_shl_i32(ptr addrspace(1) %out, i ; SI-LABEL: s_ubfe_sub_multi_use_shl_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s6, 0 -; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 -; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_sub_i32 s3, 32, s3 -; SI-NEXT: s_lshl_b32 s2, s2, s3 -; SI-NEXT: s_lshr_b32 s3, s2, s3 ; SI-NEXT: v_mov_b32_e32 v1, 0 -; SI-NEXT: s_mov_b64 s[4:5], s[0:1] -; SI-NEXT: v_mov_b32_e32 v2, s3 -; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: v_mov_b32_e32 v0, s2 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_sub_i32 s3, 32, s5 +; SI-NEXT: s_lshl_b32 s4, s4, s3 +; SI-NEXT: s_lshr_b32 s5, s4, s3 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_endpgm ; @@ -322,17 +322,17 @@ define amdgpu_kernel void @s_sbfe_sub_i32(ptr addrspace(1) %out, i32 %src, i32 % ; SI-LABEL: s_sbfe_sub_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, 0 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 -; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_sub_i32 s3, 32, s3 -; SI-NEXT: s_lshl_b32 s2, s2, s3 -; SI-NEXT: s_ashr_i32 s2, s2, s3 ; SI-NEXT: v_mov_b32_e32 v1, 0 -; SI-NEXT: s_mov_b64 s[4:5], s[0:1] -; SI-NEXT: v_mov_b32_e32 v2, s2 -; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_sub_i32 s2, 32, s5 +; SI-NEXT: s_lshl_b32 s4, s4, s2 +; SI-NEXT: s_ashr_i32 s4, s4, s2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 ; SI-NEXT: s_endpgm ; ; VI-LABEL: s_sbfe_sub_i32: @@ -362,20 +362,20 @@ define amdgpu_kernel void @s_sbfe_sub_multi_use_shl_i32(ptr addrspace(1) %out, i ; SI-LABEL: s_sbfe_sub_multi_use_shl_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s6, 0 -; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 -; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_sub_i32 s3, 32, s3 -; SI-NEXT: s_lshl_b32 s2, s2, s3 -; SI-NEXT: s_ashr_i32 s3, s2, s3 ; SI-NEXT: v_mov_b32_e32 v1, 0 -; SI-NEXT: s_mov_b64 s[4:5], s[0:1] -; SI-NEXT: v_mov_b32_e32 v2, s3 -; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: v_mov_b32_e32 v0, s2 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_sub_i32 s3, 32, s5 +; SI-NEXT: s_lshl_b32 s4, s4, s3 +; SI-NEXT: s_ashr_i32 s5, s4, s3 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/bfi_nested.ll b/llvm/test/CodeGen/AMDGPU/bfi_nested.ll index bd76f34832f0a..7326adae8cbcb 100644 --- a/llvm/test/CodeGen/AMDGPU/bfi_nested.ll +++ b/llvm/test/CodeGen/AMDGPU/bfi_nested.ll @@ -284,16 +284,15 @@ define amdgpu_kernel void @v_bfi_dont_applied_for_scalar_ops(ptr addrspace(1) %o ; GCN-LABEL: v_bfi_dont_applied_for_scalar_ops: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_and_b32 s3, s3, 0xffff0000 -; GCN-NEXT: s_and_b32 s2, s2, 0xffff -; GCN-NEXT: s_or_b32 s2, s2, s3 -; GCN-NEXT: s_mov_b32 s6, -1 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_and_b32 s2, s5, 0xffff0000 +; GCN-NEXT: s_and_b32 s4, s4, 0xffff +; GCN-NEXT: s_or_b32 s4, s4, s2 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm %shift = lshr i32 %b, 16 %tr = trunc i32 %shift to i16 diff --git a/llvm/test/CodeGen/AMDGPU/bfm.ll b/llvm/test/CodeGen/AMDGPU/bfm.ll index a12b5ea4c0c21..172e07f6b792c 100644 --- a/llvm/test/CodeGen/AMDGPU/bfm.ll +++ b/llvm/test/CodeGen/AMDGPU/bfm.ll @@ -6,14 +6,13 @@ define amdgpu_kernel void @s_bfm_pattern(ptr addrspace(1) %out, i32 %x, i32 %y) ; SI-LABEL: s_bfm_pattern: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_bfm_b32 s2, s2, s3 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s2 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_bfm_b32 s4, s4, s5 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: s_bfm_pattern: diff --git a/llvm/test/CodeGen/AMDGPU/bitreverse.ll b/llvm/test/CodeGen/AMDGPU/bitreverse.ll index d4f56175d790c..e33b9ab0eda9e 100644 --- a/llvm/test/CodeGen/AMDGPU/bitreverse.ll +++ b/llvm/test/CodeGen/AMDGPU/bitreverse.ll @@ -362,31 +362,29 @@ define amdgpu_kernel void @s_brev_v2i32(ptr addrspace(1) noalias %out, <2 x i32> ; SI-LABEL: s_brev_v2i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_brev_b32 s0, s3 -; SI-NEXT: s_brev_b32 s1, s2 -; SI-NEXT: v_mov_b32_e32 v0, s1 -; SI-NEXT: v_mov_b32_e32 v1, s0 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_brev_b32 s5, s5 +; SI-NEXT: s_brev_b32 s4, s4 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; FLAT-LABEL: s_brev_v2i32: ; FLAT: ; %bb.0: ; FLAT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; FLAT-NEXT: s_mov_b32 s7, 0xf000 -; FLAT-NEXT: s_mov_b32 s6, -1 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) -; FLAT-NEXT: s_mov_b32 s4, s0 -; FLAT-NEXT: s_mov_b32 s5, s1 -; FLAT-NEXT: s_brev_b32 s0, s3 -; FLAT-NEXT: s_brev_b32 s1, s2 -; FLAT-NEXT: v_mov_b32_e32 v0, s1 -; FLAT-NEXT: v_mov_b32_e32 v1, s0 -; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; FLAT-NEXT: s_mov_b64 s[4:5], s[2:3] +; FLAT-NEXT: s_brev_b32 s5, s5 +; FLAT-NEXT: s_brev_b32 s4, s4 +; FLAT-NEXT: s_mov_b32 s3, 0xf000 +; FLAT-NEXT: s_mov_b32 s2, -1 +; FLAT-NEXT: v_mov_b32_e32 v0, s4 +; FLAT-NEXT: v_mov_b32_e32 v1, s5 +; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; FLAT-NEXT: s_endpgm ; ; GISEL-LABEL: s_brev_v2i32: @@ -405,16 +403,14 @@ define amdgpu_kernel void @s_brev_v2i32(ptr addrspace(1) noalias %out, <2 x i32> ; GFX11-FLAT-LABEL: s_brev_v2i32: ; GFX11-FLAT: ; %bb.0: ; GFX11-FLAT-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX11-FLAT-NEXT: s_mov_b32 s7, 0x31016000 -; GFX11-FLAT-NEXT: s_mov_b32 s6, -1 ; GFX11-FLAT-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FLAT-NEXT: s_brev_b32 s2, s2 ; GFX11-FLAT-NEXT: s_brev_b32 s3, s3 ; GFX11-FLAT-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FLAT-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3 -; GFX11-FLAT-NEXT: s_mov_b32 s4, s0 -; GFX11-FLAT-NEXT: s_mov_b32 s5, s1 -; GFX11-FLAT-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0 +; GFX11-FLAT-NEXT: s_mov_b32 s3, 0x31016000 +; GFX11-FLAT-NEXT: s_mov_b32 s2, -1 +; GFX11-FLAT-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0 ; GFX11-FLAT-NEXT: s_endpgm ; ; GFX11-GISEL-LABEL: s_brev_v2i32: diff --git a/llvm/test/CodeGen/AMDGPU/build_vector.ll b/llvm/test/CodeGen/AMDGPU/build_vector.ll index 763f436997c21..fbaaef0b29b66 100644 --- a/llvm/test/CodeGen/AMDGPU/build_vector.ll +++ b/llvm/test/CodeGen/AMDGPU/build_vector.ll @@ -255,16 +255,15 @@ define amdgpu_kernel void @build_v2i32_from_v4i16_shuffle(ptr addrspace(1) %out, ; GFX6-LABEL: build_v2i32_from_v4i16_shuffle: ; GFX6: ; %bb.0: ; %entry ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: s_lshl_b32 s0, s3, 16 -; GFX6-NEXT: s_lshl_b32 s1, s2, 16 -; GFX6-NEXT: v_mov_b32_e32 v0, s1 -; GFX6-NEXT: v_mov_b32_e32 v1, s0 -; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_lshl_b32 s5, s5, 16 +; GFX6-NEXT: s_lshl_b32 s4, s4, 16 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: v_mov_b32_e32 v1, s5 +; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX8-LABEL: build_v2i32_from_v4i16_shuffle: diff --git a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll index 3d315f8a12202..4cbd41c1b1965 100644 --- a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll +++ b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll @@ -647,20 +647,20 @@ define amdgpu_kernel void @sub_zext_setcc_commute(ptr addrspace(1) nocapture %ar ; GCN-LABEL: sub_zext_setcc_commute: ; GCN: ; %bb.0: ; %bb ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, 0 ; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0 -; GCN-NEXT: v_mov_b32_e32 v3, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b64 s[4:5], s[0:1] -; GCN-NEXT: buffer_load_dword v4, v[2:3], s[4:7], 0 addr64 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, 0 +; GCN-NEXT: v_mov_b32_e32 v3, 0 +; GCN-NEXT: buffer_load_dword v4, v[2:3], s[0:3], 0 addr64 ; GCN-NEXT: v_cmp_gt_u32_e32 vcc, v0, v1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4 -; GCN-NEXT: v_add_i32_e32 v0, vcc, s2, v0 -; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0 -; GCN-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0 +; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s5, v0 +; GCN-NEXT: buffer_store_dword v0, v[2:3], s[0:3], 0 addr64 ; GCN-NEXT: s_endpgm ; ; GFX9-LABEL: sub_zext_setcc_commute: @@ -696,20 +696,20 @@ define amdgpu_kernel void @sub_sext_setcc_commute(ptr addrspace(1) nocapture %ar ; GCN-LABEL: sub_sext_setcc_commute: ; GCN: ; %bb.0: ; %bb ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, 0 ; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0 -; GCN-NEXT: v_mov_b32_e32 v3, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b64 s[4:5], s[0:1] -; GCN-NEXT: buffer_load_dword v4, v[2:3], s[4:7], 0 addr64 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, 0 +; GCN-NEXT: v_mov_b32_e32 v3, 0 +; GCN-NEXT: buffer_load_dword v4, v[2:3], s[0:3], 0 addr64 ; GCN-NEXT: v_cmp_gt_u32_e32 vcc, v0, v1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4 -; GCN-NEXT: v_add_i32_e32 v0, vcc, s2, v0 -; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0 -; GCN-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0 +; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s5, v0 +; GCN-NEXT: buffer_store_dword v0, v[2:3], s[0:3], 0 addr64 ; GCN-NEXT: s_endpgm ; ; GFX9-LABEL: sub_sext_setcc_commute: diff --git a/llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll b/llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll new file mode 100644 index 0000000000000..244c3f7c2a96a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll @@ -0,0 +1,46 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s + +define i32 @known_positive(float nofpclass(nan ninf nzero nsub nnorm) %signbit.zero) #0 { +; CHECK-LABEL: known_positive: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cast = bitcast float %signbit.zero to i32 + %and = and i32 %cast, 2147483647 + ret i32 %and +} + +define i32 @known_positive_maybe_nan(float nofpclass(ninf nzero nsub nnorm) %signbit.zero) #0 { +; CHECK-LABEL: known_positive_maybe_nan: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cast = bitcast float %signbit.zero to i32 + %and = and i32 %cast, 2147483647 + ret i32 %and +} + +define i32 @known_negative(float nofpclass(nan pinf pzero psub pnorm) %signbit.one) #0 { +; CHECK-LABEL: known_negative: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cast = bitcast float %signbit.one to i32 + %or = or i32 %cast, -2147483648 + ret i32 %or +} + +define i32 @known_negative_maybe_nan(float nofpclass(pinf pzero psub pnorm) %signbit.one) #0 { +; CHECK-LABEL: known_negative_maybe_nan: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_or_b32_e32 v0, 0x80000000, v0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cast = bitcast float %signbit.one to i32 + %or = or i32 %cast, -2147483648 + ret i32 %or +} + +attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll index f5227eed458d6..285dc7cd4ce7c 100644 --- a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll +++ b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll @@ -345,15 +345,13 @@ define float @test_copysign_pow_fast_f32__integral_y(float %x, i32 %y.i) { ; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v3 ; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v4, vcc ; GFX9-NEXT: v_fma_f32 v2, v2, v1, v3 -; GFX9-NEXT: v_cvt_i32_f32_e32 v1, v1 ; GFX9-NEXT: v_exp_f32_e32 v2, v2 +; GFX9-NEXT: v_cvt_i32_f32_e32 v1, v1 ; GFX9-NEXT: v_not_b32_e32 v3, 63 ; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 ; GFX9-NEXT: v_ldexp_f32 v2, v2, v3 -; GFX9-NEXT: v_and_b32_e32 v0, v1, v0 -; GFX9-NEXT: s_brev_b32 s4, -2 -; GFX9-NEXT: v_bfi_b32 v0, s4, v2, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 +; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2 ; GFX9-NEXT: s_setpc_b64 s[30:31] %y = sitofp i32 %y.i to float %y.fptosi = fptosi float %y to i32 @@ -370,4 +368,109 @@ define float @test_copysign_pow_fast_f32__integral_y(float %x, i32 %y.i) { ret float %pow_sign1 } +define double @test_pow_fast_f64integral_y(double %x, i32 %y.i) #0 { +; GFX9-LABEL: test_pow_fast_f64integral_y: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_mov_b32 s16, s33 +; GFX9-NEXT: s_mov_b32 s33, s32 +; GFX9-NEXT: s_or_saveexec_b64 s[18:19], -1 +; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill +; GFX9-NEXT: s_mov_b64 exec, s[18:19] +; GFX9-NEXT: v_writelane_b32 v43, s16, 14 +; GFX9-NEXT: s_addk_i32 s32, 0x800 +; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s33 ; 4-byte Folded Spill +; GFX9-NEXT: v_writelane_b32 v43, s34, 0 +; GFX9-NEXT: v_writelane_b32 v43, s35, 1 +; GFX9-NEXT: v_writelane_b32 v43, s36, 2 +; GFX9-NEXT: v_writelane_b32 v43, s37, 3 +; GFX9-NEXT: v_writelane_b32 v43, s38, 4 +; GFX9-NEXT: v_writelane_b32 v43, s39, 5 +; GFX9-NEXT: v_writelane_b32 v43, s48, 6 +; GFX9-NEXT: v_writelane_b32 v43, s49, 7 +; GFX9-NEXT: v_writelane_b32 v43, s50, 8 +; GFX9-NEXT: v_writelane_b32 v43, s51, 9 +; GFX9-NEXT: v_writelane_b32 v43, s52, 10 +; GFX9-NEXT: v_writelane_b32 v43, s53, 11 +; GFX9-NEXT: v_writelane_b32 v43, s30, 12 +; GFX9-NEXT: v_writelane_b32 v43, s31, 13 +; GFX9-NEXT: v_mov_b32_e32 v42, v1 +; GFX9-NEXT: v_and_b32_e32 v1, 0x7fffffff, v42 +; GFX9-NEXT: s_getpc_b64 s[16:17] +; GFX9-NEXT: s_add_u32 s16, s16, _Z4log2d@rel32@lo+4 +; GFX9-NEXT: s_addc_u32 s17, s17, _Z4log2d@rel32@hi+12 +; GFX9-NEXT: v_mov_b32_e32 v40, v31 +; GFX9-NEXT: v_mov_b32_e32 v41, v2 +; GFX9-NEXT: s_mov_b32 s50, s15 +; GFX9-NEXT: s_mov_b32 s51, s14 +; GFX9-NEXT: s_mov_b32 s52, s13 +; GFX9-NEXT: s_mov_b32 s53, s12 +; GFX9-NEXT: s_mov_b64 s[34:35], s[10:11] +; GFX9-NEXT: s_mov_b64 s[36:37], s[8:9] +; GFX9-NEXT: s_mov_b64 s[38:39], s[6:7] +; GFX9-NEXT: s_mov_b64 s[48:49], s[4:5] +; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17] +; GFX9-NEXT: v_cvt_f64_i32_e32 v[2:3], v41 +; GFX9-NEXT: s_getpc_b64 s[16:17] +; GFX9-NEXT: s_add_u32 s16, s16, _Z4exp2d@rel32@lo+4 +; GFX9-NEXT: s_addc_u32 s17, s17, _Z4exp2d@rel32@hi+12 +; GFX9-NEXT: s_mov_b64 s[4:5], s[48:49] +; GFX9-NEXT: s_mov_b64 s[6:7], s[38:39] +; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] +; GFX9-NEXT: s_mov_b64 s[8:9], s[36:37] +; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35] +; GFX9-NEXT: s_mov_b32 s12, s53 +; GFX9-NEXT: s_mov_b32 s13, s52 +; GFX9-NEXT: s_mov_b32 s14, s51 +; GFX9-NEXT: s_mov_b32 s15, s50 +; GFX9-NEXT: v_mov_b32_e32 v31, v40 +; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17] +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v41 +; GFX9-NEXT: v_and_b32_e32 v2, v2, v42 +; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s33 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload +; GFX9-NEXT: v_readlane_b32 s30, v43, 12 +; GFX9-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX9-NEXT: v_readlane_b32 s31, v43, 13 +; GFX9-NEXT: v_readlane_b32 s53, v43, 11 +; GFX9-NEXT: v_readlane_b32 s52, v43, 10 +; GFX9-NEXT: v_readlane_b32 s51, v43, 9 +; GFX9-NEXT: v_readlane_b32 s50, v43, 8 +; GFX9-NEXT: v_readlane_b32 s49, v43, 7 +; GFX9-NEXT: v_readlane_b32 s48, v43, 6 +; GFX9-NEXT: v_readlane_b32 s39, v43, 5 +; GFX9-NEXT: v_readlane_b32 s38, v43, 4 +; GFX9-NEXT: v_readlane_b32 s37, v43, 3 +; GFX9-NEXT: v_readlane_b32 s36, v43, 2 +; GFX9-NEXT: v_readlane_b32 s35, v43, 1 +; GFX9-NEXT: v_readlane_b32 s34, v43, 0 +; GFX9-NEXT: s_mov_b32 s32, s33 +; GFX9-NEXT: v_readlane_b32 s4, v43, 14 +; GFX9-NEXT: s_or_saveexec_b64 s[6:7], -1 +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload +; GFX9-NEXT: s_mov_b64 exec, s[6:7] +; GFX9-NEXT: s_mov_b32 s33, s4 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] + %fabs = call fast double @llvm.fabs.f64(double %x) + %log2 = call fast double @_Z4log2d(double %fabs) + %pownI2F = sitofp i32 %y.i to double + %ylogx = fmul fast double %log2, %pownI2F + %exp2 = call fast nofpclass(nan ninf nzero nsub nnorm) double @_Z4exp2d(double %ylogx) + %ytou = zext i32 %y.i to i64 + %yeven = shl i64 %ytou, 63 + %x.i64 = bitcast double %x to i64 + %pow_sign = and i64 %yeven, %x.i64 + %pow_sign.f64 = bitcast i64 %pow_sign to double + %pow_sign1 = call fast double @llvm.copysign.f64(double %exp2, double %pow_sign.f64) + ret double %pow_sign1 +} + +declare hidden double @_Z4exp2d(double) #1 +declare hidden double @_Z4log2d(double) #1 + attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } +attributes #1 = { norecurse nounwind memory(read) } diff --git a/llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll b/llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll new file mode 100644 index 0000000000000..afd610f4911c6 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll @@ -0,0 +1,198 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s + +; Negative test, don't know %x is positive +define half @copysign_known_signmask_f16(half %x, i16 %sign) { +; GFX9-LABEL: copysign_known_signmask_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 15, v1 +; GFX9-NEXT: s_movk_i32 s4, 0x7fff +; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signmask = shl i16 %sign, 15 + %signmask.bitcast = bitcast i16 %signmask to half + %result = call half @llvm.copysign.f16(half %x, half %signmask.bitcast) + ret half %result +} + +; Negative test, don't know %x is positive +define float @copysign_known_signmask_f32(float %x, i32 %sign) { +; GFX9-LABEL: copysign_known_signmask_f32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 +; GFX9-NEXT: s_brev_b32 s4, -2 +; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signmask = shl i32 %sign, 31 + %signmask.bitcast = bitcast i32 %signmask to float + %result = call float @llvm.copysign.f32(float %x, float %signmask.bitcast) + ret float %result +} + +; Negative test, don't know %x is positive +define double @copysign_known_signmask_f64(double %x, i64 %sign) { +; GFX9-LABEL: copysign_known_signmask_f64: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v2 +; GFX9-NEXT: s_brev_b32 s4, -2 +; GFX9-NEXT: v_bfi_b32 v1, s4, v1, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signmask = shl i64 %sign, 63 + %signmask.bitcast = bitcast i64 %signmask to double + %result = call double @llvm.copysign.f64(double %x, double %signmask.bitcast) + ret double %result +} + +; Negative test, don't know %x is positive +define float @copysign_known_signmask_f32_known_not_known_positive_mag_maybe_nan(float nofpclass(ninf nzero nsub nnorm) %sign.bit.known.zero, i32 %sign) { +; GFX9-LABEL: copysign_known_signmask_f32_known_not_known_positive_mag_maybe_nan: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 +; GFX9-NEXT: s_brev_b32 s4, -2 +; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signmask = shl i32 %sign, 31 + %signmask.bitcast = bitcast i32 %signmask to float + %result = call float @llvm.copysign.f32(float %sign.bit.known.zero, float %signmask.bitcast) + ret float %result +} + +; Negative test, don't know %x is positive +define float @copysign_known_signmask_f32_known_not_known_positive_mag_maybe_negzero(float nofpclass(nan ninf nsub nnorm) %sign.bit.known.zero, i32 %sign) { +; GFX9-LABEL: copysign_known_signmask_f32_known_not_known_positive_mag_maybe_negzero: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 +; GFX9-NEXT: s_brev_b32 s4, -2 +; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signmask = shl i32 %sign, 31 + %signmask.bitcast = bitcast i32 %signmask to float + %result = call float @llvm.copysign.f32(float %sign.bit.known.zero, float %signmask.bitcast) + ret float %result +} + +define half @copysign_known_signmask_f16_known_positive_mag(half nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i16 %sign) { +; GFX9-LABEL: copysign_known_signmask_f16_known_positive_mag: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 15, v1 +; GFX9-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signmask = shl i16 %sign, 15 + %signmask.bitcast = bitcast i16 %signmask to half + %result = call half @llvm.copysign.f16(half %sign.bit.known.zero, half %signmask.bitcast) + ret half %result +} + +define float @copysign_known_signmask_f32_known_positive_mag(float nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i32 %sign) { +; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signmask = shl i32 %sign, 31 + %signmask.bitcast = bitcast i32 %signmask to float + %result = call float @llvm.copysign.f32(float %sign.bit.known.zero, float %signmask.bitcast) + ret float %result +} + +define double @copysign_known_signmask_f64_known_positive_mag(double nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i64 %sign) { +; GFX9-LABEL: copysign_known_signmask_f64_known_positive_mag: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v2 +; GFX9-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signmask = shl i64 %sign, 63 + %signmask.bitcast = bitcast i64 %signmask to double + %result = call double @llvm.copysign.f64(double %sign.bit.known.zero, double %signmask.bitcast) + ret double %result +} + +; exp always returns a positive result, excluding the unknown nan sign +; bit. +define float @copysign_known_signmask_f32_known_positive_mag__nnan_exp(float %x, i32 %sign) { +; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag__nnan_exp: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_mov_b32 s4, 0xc2aeac50 +; GFX9-NEXT: v_add_f32_e32 v2, 0x42800000, v0 +; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX9-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: v_mul_f32_e32 v2, 0x114b4ea4, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signbit.known.zero = call nnan afn float @llvm.exp.f32(float %x) + %signmask = shl i32 %sign, 31 + %signmask.bitcast = bitcast i32 %signmask to float + %result = call float @llvm.copysign.f32(float %signbit.known.zero, float %signmask.bitcast) + ret float %result +} + +define float @copysign_known_signmask_f32_known_positive_mag__nnan_exp2(float %x, i32 %sign) { +; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag__nnan_exp2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_mov_b32 s4, 0xc2fc0000 +; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0 +; GFX9-NEXT: v_mov_b32_e32 v3, 0x42800000 +; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; GFX9-NEXT: v_add_f32_e32 v0, v0, v3 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: v_not_b32_e32 v2, 63 +; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc +; GFX9-NEXT: v_ldexp_f32 v0, v0, v2 +; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signbit.known.zero = call nnan afn float @llvm.exp2.f32(float %x) + %signmask = shl i32 %sign, 31 + %signmask.bitcast = bitcast i32 %signmask to float + %result = call float @llvm.copysign.f32(float %signbit.known.zero, float %signmask.bitcast) + ret float %result +} + +define float @copysign_known_signmask_f32_known_positive_mag__nnan_exp10(float %x, i32 %sign) { +; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag__nnan_exp10: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_mov_b32 s4, 0xc2fc0000 +; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0 +; GFX9-NEXT: v_mov_b32_e32 v3, 0x42800000 +; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc +; GFX9-NEXT: v_add_f32_e32 v0, v0, v3 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: v_not_b32_e32 v2, 63 +; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc +; GFX9-NEXT: v_ldexp_f32 v0, v0, v2 +; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signbit.known.zero = call nnan afn float @llvm.exp2.f32(float %x) + %signmask = shl i32 %sign, 31 + %signmask.bitcast = bitcast i32 %signmask to float + %result = call float @llvm.copysign.f32(float %signbit.known.zero, float %signmask.bitcast) + ret float %result +} + +define float @copysign_known_signmask_f32_known_positive_mag_through_fence(float nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i32 %sign) { +; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag_through_fence: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1 +; GFX9-NEXT: ;ARITH_FENCE +; GFX9-NEXT: s_brev_b32 s4, -2 +; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %signmask = shl i32 %sign, 31 + %signmask.bitcast = bitcast i32 %signmask to float + %fence = call float @llvm.arithmetic.fence.f32(float %sign.bit.known.zero) + %result = call float @llvm.copysign.f32(float %fence, float %signmask.bitcast) + ret float %result +} diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll index ab96dcf1f6069..08545b901581c 100644 --- a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll +++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll @@ -390,16 +390,15 @@ define amdgpu_kernel void @uniform_vec_i16_LH(ptr addrspace(1) %out, i16 %a, i32 ; GCN-LABEL: uniform_vec_i16_LH: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_and_b32 s3, s3, 0xffff0000 -; GCN-NEXT: s_and_b32 s2, s2, 0xffff -; GCN-NEXT: s_or_b32 s2, s2, s3 -; GCN-NEXT: s_mov_b32 s6, -1 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_and_b32 s2, s5, 0xffff0000 +; GCN-NEXT: s_and_b32 s4, s4, 0xffff +; GCN-NEXT: s_or_b32 s4, s4, s2 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; GFX9-LABEL: uniform_vec_i16_LH: diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll index 4c3fd40d7a25a..d8f9bc1a0e054 100644 --- a/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll +++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll @@ -5,15 +5,14 @@ define amdgpu_kernel void @uniform_sext_in_reg_i8_to_i32(ptr addrspace(1) %out, ; GCN-LABEL: uniform_sext_in_reg_i8_to_i32: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_add_i32 s2, s2, s3 -; GCN-NEXT: s_sext_i32_i8 s2, s2 -; GCN-NEXT: s_mov_b32 s6, -1 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_add_i32 s2, s4, s5 +; GCN-NEXT: s_sext_i32_i8 s4, s2 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm %c = add i32 %a, %b ; add to prevent folding into extload %shl = shl i32 %c, 24 @@ -26,15 +25,14 @@ define amdgpu_kernel void @divergent_sext_in_reg_i8_to_i32(ptr addrspace(1) %out ; GCN-LABEL: divergent_sext_in_reg_i8_to_i32: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: s_add_i32 s0, s2, s3 -; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: s_add_i32 s4, s4, s5 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 8 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %c = add i32 %a, %b ; add to prevent folding into extload @@ -49,15 +47,14 @@ define amdgpu_kernel void @uniform_sext_in_reg_i16_to_i32(ptr addrspace(1) %out, ; GCN-LABEL: uniform_sext_in_reg_i16_to_i32: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_add_i32 s2, s2, s3 -; GCN-NEXT: s_sext_i32_i16 s2, s2 -; GCN-NEXT: s_mov_b32 s6, -1 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_add_i32 s2, s4, s5 +; GCN-NEXT: s_sext_i32_i16 s4, s2 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm %c = add i32 %a, %b ; add to prevent folding into extload %shl = shl i32 %c, 16 @@ -70,15 +67,14 @@ define amdgpu_kernel void @divergent_sext_in_reg_i16_to_i32(ptr addrspace(1) %ou ; GCN-LABEL: divergent_sext_in_reg_i16_to_i32: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: s_add_i32 s0, s2, s3 -; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: s_add_i32 s4, s4, s5 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %c = add i32 %a, %b ; add to prevent folding into extload diff --git a/llvm/test/CodeGen/AMDGPU/fabs.ll b/llvm/test/CodeGen/AMDGPU/fabs.ll index 6bcb086944c91..97e23fcdb2263 100644 --- a/llvm/test/CodeGen/AMDGPU/fabs.ll +++ b/llvm/test/CodeGen/AMDGPU/fabs.ll @@ -99,16 +99,15 @@ define amdgpu_kernel void @fabs_v2f32(ptr addrspace(1) %out, <2 x float> %in) { ; SI-LABEL: fabs_v2f32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_and_b32 s0, s3, 0x7fffffff -; SI-NEXT: s_and_b32 s1, s2, 0x7fffffff -; SI-NEXT: v_mov_b32_e32 v0, s1 -; SI-NEXT: v_mov_b32_e32 v1, s0 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_bitset0_b32 s5, 31 +; SI-NEXT: s_bitset0_b32 s4, 31 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: fabs_v2f32: diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.ll b/llvm/test/CodeGen/AMDGPU/fdiv.ll index b826e6c469d8e..4d448e64f0921 100644 --- a/llvm/test/CodeGen/AMDGPU/fdiv.ll +++ b/llvm/test/CodeGen/AMDGPU/fdiv.ll @@ -333,18 +333,17 @@ define amdgpu_kernel void @s_fdiv_25ulp_f32(ptr addrspace(1) %out, float %a, flo ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; GFX67-NEXT: v_mov_b32_e32 v0, 0x6f800000 ; GFX67-NEXT: v_mov_b32_e32 v1, 0x2f800000 -; GFX67-NEXT: s_mov_b32 s7, 0xf000 -; GFX67-NEXT: s_mov_b32 s6, -1 ; GFX67-NEXT: s_waitcnt lgkmcnt(0) -; GFX67-NEXT: v_cmp_gt_f32_e64 vcc, |s3|, v0 +; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX67-NEXT: v_cmp_gt_f32_e64 vcc, |s5|, v0 ; GFX67-NEXT: v_cndmask_b32_e32 v0, 1.0, v1, vcc -; GFX67-NEXT: v_mul_f32_e32 v1, s3, v0 +; GFX67-NEXT: v_mul_f32_e32 v1, s5, v0 ; GFX67-NEXT: v_rcp_f32_e32 v1, v1 -; GFX67-NEXT: s_mov_b32 s4, s0 -; GFX67-NEXT: s_mov_b32 s5, s1 -; GFX67-NEXT: v_mul_f32_e32 v1, s2, v1 +; GFX67-NEXT: s_mov_b32 s3, 0xf000 +; GFX67-NEXT: s_mov_b32 s2, -1 +; GFX67-NEXT: v_mul_f32_e32 v1, s4, v1 ; GFX67-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX67-NEXT: s_endpgm ; ; GFX8-LABEL: s_fdiv_25ulp_f32: @@ -441,20 +440,19 @@ define amdgpu_kernel void @s_fdiv_25ulp_ieee_f32(ptr addrspace(1) %out, float %a ; GFX7-LABEL: s_fdiv_25ulp_ieee_f32: ; GFX7: ; %bb.0: ; %entry ; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX7-NEXT: s_mov_b32 s7, 0xf000 -; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_frexp_mant_f32_e32 v0, s3 +; GFX7-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX7-NEXT: v_frexp_mant_f32_e32 v0, s5 ; GFX7-NEXT: v_rcp_f32_e32 v0, v0 -; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v1, s3 -; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v2, s2 -; GFX7-NEXT: v_frexp_mant_f32_e32 v3, s2 +; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v1, s5 +; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v2, s4 +; GFX7-NEXT: v_frexp_mant_f32_e32 v3, s4 ; GFX7-NEXT: v_mul_f32_e32 v0, v3, v0 ; GFX7-NEXT: v_sub_i32_e32 v1, vcc, v2, v1 -; GFX7-NEXT: s_mov_b32 s4, s0 -; GFX7-NEXT: s_mov_b32 s5, s1 +; GFX7-NEXT: s_mov_b32 s3, 0xf000 +; GFX7-NEXT: s_mov_b32 s2, -1 ; GFX7-NEXT: v_ldexp_f32_e32 v0, v0, v1 -; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX7-NEXT: s_endpgm ; ; GFX8-LABEL: s_fdiv_25ulp_ieee_f32: @@ -528,14 +526,13 @@ define amdgpu_kernel void @s_fdiv_fast_ieee_f32(ptr addrspace(1) %out, float %a, ; GFX67-LABEL: s_fdiv_fast_ieee_f32: ; GFX67: ; %bb.0: ; %entry ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX67-NEXT: s_mov_b32 s7, 0xf000 -; GFX67-NEXT: s_mov_b32 s6, -1 ; GFX67-NEXT: s_waitcnt lgkmcnt(0) -; GFX67-NEXT: v_rcp_f32_e32 v0, s3 -; GFX67-NEXT: s_mov_b32 s4, s0 -; GFX67-NEXT: s_mov_b32 s5, s1 -; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0 -; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX67-NEXT: v_rcp_f32_e32 v0, s5 +; GFX67-NEXT: s_mov_b32 s3, 0xf000 +; GFX67-NEXT: s_mov_b32 s2, -1 +; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0 +; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX67-NEXT: s_endpgm ; ; GFX8-LABEL: s_fdiv_fast_ieee_f32: @@ -590,14 +587,13 @@ define amdgpu_kernel void @s_fdiv_f32_fast_math(ptr addrspace(1) %out, float %a, ; GFX67-LABEL: s_fdiv_f32_fast_math: ; GFX67: ; %bb.0: ; %entry ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX67-NEXT: s_mov_b32 s7, 0xf000 -; GFX67-NEXT: s_mov_b32 s6, -1 ; GFX67-NEXT: s_waitcnt lgkmcnt(0) -; GFX67-NEXT: v_rcp_f32_e32 v0, s3 -; GFX67-NEXT: s_mov_b32 s4, s0 -; GFX67-NEXT: s_mov_b32 s5, s1 -; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0 -; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX67-NEXT: v_rcp_f32_e32 v0, s5 +; GFX67-NEXT: s_mov_b32 s3, 0xf000 +; GFX67-NEXT: s_mov_b32 s2, -1 +; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0 +; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX67-NEXT: s_endpgm ; ; GFX8-LABEL: s_fdiv_f32_fast_math: @@ -652,14 +648,13 @@ define amdgpu_kernel void @s_fdiv_ulp25_f32_fast_math(ptr addrspace(1) %out, flo ; GFX67-LABEL: s_fdiv_ulp25_f32_fast_math: ; GFX67: ; %bb.0: ; %entry ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX67-NEXT: s_mov_b32 s7, 0xf000 -; GFX67-NEXT: s_mov_b32 s6, -1 ; GFX67-NEXT: s_waitcnt lgkmcnt(0) -; GFX67-NEXT: v_rcp_f32_e32 v0, s3 -; GFX67-NEXT: s_mov_b32 s4, s0 -; GFX67-NEXT: s_mov_b32 s5, s1 -; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0 -; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX67-NEXT: v_rcp_f32_e32 v0, s5 +; GFX67-NEXT: s_mov_b32 s3, 0xf000 +; GFX67-NEXT: s_mov_b32 s2, -1 +; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0 +; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX67-NEXT: s_endpgm ; ; GFX8-LABEL: s_fdiv_ulp25_f32_fast_math: @@ -877,14 +872,13 @@ define amdgpu_kernel void @s_fdiv_f32_arcp_ninf(ptr addrspace(1) %out, float %a, ; GFX67-LABEL: s_fdiv_f32_arcp_ninf: ; GFX67: ; %bb.0: ; %entry ; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX67-NEXT: s_mov_b32 s7, 0xf000 -; GFX67-NEXT: s_mov_b32 s6, -1 ; GFX67-NEXT: s_waitcnt lgkmcnt(0) -; GFX67-NEXT: v_rcp_f32_e32 v0, s3 -; GFX67-NEXT: s_mov_b32 s4, s0 -; GFX67-NEXT: s_mov_b32 s5, s1 -; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0 -; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX67-NEXT: v_rcp_f32_e32 v0, s5 +; GFX67-NEXT: s_mov_b32 s3, 0xf000 +; GFX67-NEXT: s_mov_b32 s2, -1 +; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0 +; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX67-NEXT: s_endpgm ; ; GFX8-LABEL: s_fdiv_f32_arcp_ninf: diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll index defcffa641e64..39eefa1879870 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll @@ -75,9 +75,12 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out ; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src: ; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} -; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 -; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 +; SI: s_mov_b64 s[[[#COPY:]]:{{[0-9]+}}], s{{\[}}[[#LOAD + 2]]:[[#LOAD + 3]]{{\]}} +; SI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#COPY]], 1.0 +; SI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#COPY + 1]], 2.0 +; VI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 +; VI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 ; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] ; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] @@ -96,8 +99,12 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) ; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src_fast: ; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} -; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 -; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 +; SI: s_mov_b64 s[[[#COPY:]]:{{[0-9]+}}], s{{\[}}[[#LOAD + 2]]:[[#LOAD + 3]]{{\]}} +; SI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#COPY]], 1.0 +; SI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#COPY + 1]], 2.0 + +; VI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 +; VI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 ; GCN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src_fast(ptr addrspace(1) %out, float %a, float %b) #0 { diff --git a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll index a025c36f620c7..6c2ab5fb15a20 100644 --- a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll +++ b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll @@ -121,14 +121,13 @@ define amdgpu_kernel void @fnearbyint_v2f32(ptr addrspace(1) %out, <2 x float> % ; SICI-LABEL: fnearbyint_v2f32: ; SICI: ; %bb.0: ; %entry ; SICI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SICI-NEXT: s_mov_b32 s7, 0xf000 -; SICI-NEXT: s_mov_b32 s6, -1 ; SICI-NEXT: s_waitcnt lgkmcnt(0) -; SICI-NEXT: s_mov_b32 s4, s0 -; SICI-NEXT: s_mov_b32 s5, s1 -; SICI-NEXT: v_rndne_f32_e32 v1, s3 -; SICI-NEXT: v_rndne_f32_e32 v0, s2 -; SICI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SICI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SICI-NEXT: s_mov_b32 s3, 0xf000 +; SICI-NEXT: s_mov_b32 s2, -1 +; SICI-NEXT: v_rndne_f32_e32 v1, s5 +; SICI-NEXT: v_rndne_f32_e32 v0, s4 +; SICI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SICI-NEXT: s_endpgm ; ; VI-LABEL: fnearbyint_v2f32: diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll index 1fa9bfa3cfa3f..214ccedd75170 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll @@ -199,16 +199,15 @@ define amdgpu_kernel void @fneg_fabsf_v2f32(ptr addrspace(1) %out, <2 x float> % ; SI-LABEL: fneg_fabsf_v2f32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_bitset1_b32 s3, 31 -; SI-NEXT: s_bitset1_b32 s2, 31 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s2 -; SI-NEXT: v_mov_b32_e32 v1, s3 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_bitset1_b32 s5, 31 +; SI-NEXT: s_bitset1_b32 s4, 31 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: fneg_fabsf_v2f32: diff --git a/llvm/test/CodeGen/AMDGPU/fneg.ll b/llvm/test/CodeGen/AMDGPU/fneg.ll index c3f4ebe30152b..02235151a83e1 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg.ll @@ -52,16 +52,15 @@ define amdgpu_kernel void @s_fneg_v2f32(ptr addrspace(1) nocapture %out, <2 x fl ; SI-LABEL: s_fneg_v2f32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_xor_b32 s0, s3, 0x80000000 -; SI-NEXT: s_xor_b32 s1, s2, 0x80000000 -; SI-NEXT: v_mov_b32_e32 v0, s1 -; SI-NEXT: v_mov_b32_e32 v1, s0 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_xor_b32 s5, s5, 0x80000000 +; SI-NEXT: s_xor_b32 s4, s4, 0x80000000 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: s_fneg_v2f32: diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll index 7ab8b30681eb1..0c5ed00b58d90 100644 --- a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll +++ b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll @@ -88,27 +88,24 @@ define amdgpu_kernel void @fp_to_sint_v2i32(ptr addrspace(1) %out, <2 x float> % ; SI-LABEL: fp_to_sint_v2i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_cvt_i32_f32_e32 v1, s3 -; SI-NEXT: v_cvt_i32_f32_e32 v0, s2 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_cvt_i32_f32_e32 v1, s5 +; SI-NEXT: v_cvt_i32_f32_e32 v0, s4 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: fp_to_sint_v2i32: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_cvt_i32_f32_e32 v1, s3 ; VI-NEXT: v_cvt_i32_f32_e32 v0, s2 -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; EG-LABEL: fp_to_sint_v2i32: @@ -294,26 +291,25 @@ entry: define amdgpu_kernel void @fp_to_sint_v2i64(ptr addrspace(1) %out, <2 x float> %x) { ; SI-LABEL: fp_to_sint_v2i64: ; SI: ; %bb.0: -; SI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_mov_b32 s6, 0x2f800000 +; SI-NEXT: s_mov_b32 s7, 0xcf800000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: s_mov_b32 s8, 0x2f800000 -; SI-NEXT: s_mov_b32 s9, 0xcf800000 -; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s0, s4 -; SI-NEXT: s_mov_b32 s1, s5 -; SI-NEXT: v_trunc_f32_e32 v0, s7 -; SI-NEXT: v_trunc_f32_e32 v1, s6 -; SI-NEXT: v_mul_f32_e64 v2, |v0|, s8 +; SI-NEXT: v_trunc_f32_e32 v0, s5 +; SI-NEXT: v_trunc_f32_e32 v1, s4 +; SI-NEXT: v_mul_f32_e64 v2, |v0|, s6 ; SI-NEXT: v_ashrrev_i32_e32 v3, 31, v0 -; SI-NEXT: v_mul_f32_e64 v4, |v1|, s8 +; SI-NEXT: v_mul_f32_e64 v4, |v1|, s6 ; SI-NEXT: v_ashrrev_i32_e32 v5, 31, v1 ; SI-NEXT: v_floor_f32_e32 v2, v2 ; SI-NEXT: v_floor_f32_e32 v4, v4 ; SI-NEXT: v_cvt_u32_f32_e32 v6, v2 -; SI-NEXT: v_fma_f32 v0, v2, s9, |v0| +; SI-NEXT: v_fma_f32 v0, v2, s7, |v0| ; SI-NEXT: v_cvt_u32_f32_e32 v2, v4 -; SI-NEXT: v_fma_f32 v1, v4, s9, |v1| +; SI-NEXT: v_fma_f32 v1, v4, s7, |v1| ; SI-NEXT: v_cvt_u32_f32_e32 v0, v0 ; SI-NEXT: v_xor_b32_e32 v4, v6, v3 ; SI-NEXT: v_cvt_u32_f32_e32 v1, v1 @@ -330,36 +326,35 @@ define amdgpu_kernel void @fp_to_sint_v2i64(ptr addrspace(1) %out, <2 x float> % ; VI-LABEL: fp_to_sint_v2i64: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s8, 0x2f800000 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s6, 0x2f800000 +; VI-NEXT: s_mov_b32 s7, 0xcf800000 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_trunc_f32_e32 v0, s3 -; VI-NEXT: v_mul_f32_e64 v1, |v0|, s8 -; VI-NEXT: s_mov_b32 s4, s0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: v_trunc_f32_e32 v0, s5 +; VI-NEXT: v_mul_f32_e64 v1, |v0|, s6 ; VI-NEXT: v_floor_f32_e32 v1, v1 -; VI-NEXT: s_mov_b32 s0, 0xcf800000 -; VI-NEXT: v_fma_f32 v2, v1, s0, |v0| -; VI-NEXT: v_trunc_f32_e32 v4, s2 -; VI-NEXT: v_cvt_u32_f32_e32 v2, v2 -; VI-NEXT: v_mul_f32_e64 v3, |v4|, s8 -; VI-NEXT: v_cvt_u32_f32_e32 v1, v1 -; VI-NEXT: v_floor_f32_e32 v3, v3 -; VI-NEXT: v_cvt_u32_f32_e32 v5, v3 -; VI-NEXT: v_fma_f32 v3, v3, s0, |v4| +; VI-NEXT: v_cvt_u32_f32_e32 v2, v1 +; VI-NEXT: v_fma_f32 v1, v1, s7, |v0| ; VI-NEXT: v_ashrrev_i32_e32 v0, 31, v0 -; VI-NEXT: v_cvt_u32_f32_e32 v6, v3 -; VI-NEXT: v_xor_b32_e32 v2, v2, v0 +; VI-NEXT: v_trunc_f32_e32 v4, s4 +; VI-NEXT: v_xor_b32_e32 v3, v2, v0 +; VI-NEXT: v_mul_f32_e64 v2, |v4|, s6 +; VI-NEXT: v_cvt_u32_f32_e32 v1, v1 +; VI-NEXT: v_floor_f32_e32 v2, v2 +; VI-NEXT: v_cvt_u32_f32_e32 v5, v2 +; VI-NEXT: v_fma_f32 v2, v2, s7, |v4| +; VI-NEXT: v_cvt_u32_f32_e32 v6, v2 ; VI-NEXT: v_xor_b32_e32 v1, v1, v0 -; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v0 -; VI-NEXT: v_subb_u32_e32 v3, vcc, v1, v0, vcc +; VI-NEXT: v_sub_u32_e32 v2, vcc, v1, v0 ; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v4 +; VI-NEXT: v_subb_u32_e32 v3, vcc, v3, v0, vcc ; VI-NEXT: v_xor_b32_e32 v0, v6, v1 ; VI-NEXT: v_xor_b32_e32 v4, v5, v1 ; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v1 -; VI-NEXT: s_mov_b32 s5, s1 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc -; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; EG-LABEL: fp_to_sint_v2i64: diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll index 5428ba88975bc..c938475ab7675 100644 --- a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll +++ b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll @@ -48,27 +48,24 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i32(ptr addrspace(1) %out, <2 x ; SI-LABEL: fp_to_uint_v2f32_to_v2i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_cvt_u32_f32_e32 v1, s3 -; SI-NEXT: v_cvt_u32_f32_e32 v0, s2 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_cvt_u32_f32_e32 v1, s5 +; SI-NEXT: v_cvt_u32_f32_e32 v0, s4 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: fp_to_uint_v2f32_to_v2i32: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_cvt_u32_f32_e32 v1, s3 ; VI-NEXT: v_cvt_u32_f32_e32 v0, s2 -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; EG-LABEL: fp_to_uint_v2f32_to_v2i32: @@ -241,32 +238,29 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(ptr addrspace(1) %out, <2 x ; SI-LABEL: fp_to_uint_v2f32_to_v2i64: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s8, 0xcf800000 +; SI-NEXT: s_mov_b32 s6, 0xcf800000 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_trunc_f32_e32 v0, s3 -; SI-NEXT: v_trunc_f32_e32 v2, s2 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_trunc_f32_e32 v0, s5 +; SI-NEXT: v_trunc_f32_e32 v2, s4 ; SI-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; SI-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2 ; SI-NEXT: v_floor_f32_e32 v4, v1 ; SI-NEXT: v_floor_f32_e32 v5, v3 ; SI-NEXT: v_cvt_u32_f32_e32 v3, v4 ; SI-NEXT: v_cvt_u32_f32_e32 v1, v5 -; SI-NEXT: v_fma_f32 v0, v4, s8, v0 -; SI-NEXT: v_fma_f32 v4, v5, s8, v2 +; SI-NEXT: v_fma_f32 v0, v4, s6, v0 +; SI-NEXT: v_fma_f32 v4, v5, s6, v2 ; SI-NEXT: v_cvt_u32_f32_e32 v2, v0 ; SI-NEXT: v_cvt_u32_f32_e32 v0, v4 -; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: fp_to_uint_v2f32_to_v2i64: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_trunc_f32_e32 v0, s3 ; VI-NEXT: v_trunc_f32_e32 v4, s2 @@ -281,9 +275,9 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(ptr addrspace(1) %out, <2 x ; VI-NEXT: v_cvt_u32_f32_e32 v3, v5 ; VI-NEXT: v_cvt_u32_f32_e32 v1, v6 ; VI-NEXT: v_cvt_u32_f32_e32 v0, v0 -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; EG-LABEL: fp_to_uint_v2f32_to_v2i64: diff --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll index ed1ee4527ed89..68b95cd9adbf3 100644 --- a/llvm/test/CodeGen/AMDGPU/fshl.ll +++ b/llvm/test/CodeGen/AMDGPU/fshl.ll @@ -691,17 +691,16 @@ define amdgpu_kernel void @orxor2or1(ptr addrspace(1) %in, i32 %a, i32 %b) { ; SI-LABEL: orxor2or1: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_lshl_b32 s0, s2, 7 -; SI-NEXT: s_or_b32 s0, s3, s0 -; SI-NEXT: s_cmp_eq_u32 s0, 0 -; SI-NEXT: s_cselect_b32 s0, s2, s3 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_lshl_b32 s6, s4, 7 +; SI-NEXT: s_or_b32 s6, s5, s6 +; SI-NEXT: s_cmp_eq_u32 s6, 0 +; SI-NEXT: s_cselect_b32 s4, s4, s5 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: orxor2or1: diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll index 44bd4090436ef..7cbf9aeacfe48 100644 --- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll +++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll @@ -1508,35 +1508,33 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(ptr addrspace(1) %out, <2 ; SI-LABEL: dynamic_insertelement_v2i16: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 -; SI-NEXT: s_mov_b32 s7, 0x100f000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_lshl_b32 s1, s3, 4 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_xor_b32 s0, s2, 0x50005 -; SI-NEXT: s_lshl_b32 s1, 0xffff, s1 -; SI-NEXT: s_and_b32 s0, s0, s1 -; SI-NEXT: s_xor_b32 s0, s0, s2 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_lshl_b32 s5, s5, 4 +; SI-NEXT: s_xor_b32 s6, s4, 0x50005 +; SI-NEXT: s_lshl_b32 s5, 0xffff, s5 +; SI-NEXT: s_and_b32 s5, s6, s5 +; SI-NEXT: s_xor_b32 s4, s5, s4 +; SI-NEXT: s_mov_b32 s3, 0x100f000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: dynamic_insertelement_v2i16: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 -; VI-NEXT: s_mov_b32 s7, 0x1100f000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_lshl_b32 s1, s3, 4 -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_xor_b32 s0, s2, 0x50005 -; VI-NEXT: s_lshl_b32 s1, 0xffff, s1 -; VI-NEXT: s_and_b32 s0, s0, s1 -; VI-NEXT: s_xor_b32 s0, s0, s2 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_lshl_b32 s5, s5, 4 +; VI-NEXT: s_xor_b32 s6, s4, 0x50005 +; VI-NEXT: s_lshl_b32 s5, 0xffff, s5 +; VI-NEXT: s_and_b32 s5, s6, s5 +; VI-NEXT: s_xor_b32 s4, s5, s4 +; VI-NEXT: s_mov_b32 s3, 0x1100f000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %vecins = insertelement <2 x i16> %a, i16 5, i32 %b store <2 x i16> %vecins, ptr addrspace(1) %out, align 8 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll index d4aa2051dc28a..e421e2c8ebfc4 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll @@ -1612,29 +1612,27 @@ define amdgpu_kernel void @v_lshr_and(ptr addrspace(1) %out, i32 %a, i32 %b) #0 ; SI-LABEL: v_lshr_and: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_lshr_b32 s2, s2, s3 -; SI-NEXT: s_and_b32 s2, s2, 7 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s2 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_lshr_b32 s2, s4, s5 +; SI-NEXT: s_and_b32 s4, s2, 7 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: v_lshr_and: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_lshr_b32 s0, s2, s3 -; VI-NEXT: s_and_b32 s0, s0, 7 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_lshr_b32 s4, s4, s5 +; VI-NEXT: s_and_b32 s4, s4, 7 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %c = lshr i32 %a, %b %d = and i32 %c, 7 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll index ac356fad5b2da..3897a0e028334 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll @@ -520,42 +520,41 @@ define amdgpu_kernel void @s_exp_v2f32(ptr addrspace(1) %out, <2 x float> %in) { ; ; SI-SDAG-LABEL: s_exp_v2f32: ; SI-SDAG: ; %bb.0: -; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x3fb8aa3b ; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x32a5705f -; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000 -; SI-SDAG-NEXT: s_mov_b32 s2, -1 ; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; SI-SDAG-NEXT: v_mul_f32_e32 v2, s7, v0 +; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-SDAG-NEXT: v_mul_f32_e32 v2, s5, v0 ; SI-SDAG-NEXT: v_rndne_f32_e32 v3, v2 -; SI-SDAG-NEXT: v_fma_f32 v4, s7, v0, -v2 +; SI-SDAG-NEXT: v_fma_f32 v4, s5, v0, -v2 ; SI-SDAG-NEXT: v_sub_f32_e32 v2, v2, v3 -; SI-SDAG-NEXT: v_fma_f32 v4, s7, v1, v4 +; SI-SDAG-NEXT: v_fma_f32 v4, s5, v1, v4 ; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v4 -; SI-SDAG-NEXT: v_mul_f32_e32 v5, s6, v0 -; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2 +; SI-SDAG-NEXT: v_mul_f32_e32 v5, s4, v0 ; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v3, v3 +; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2 ; SI-SDAG-NEXT: v_rndne_f32_e32 v6, v5 -; SI-SDAG-NEXT: v_fma_f32 v0, s6, v0, -v5 +; SI-SDAG-NEXT: v_fma_f32 v0, s4, v0, -v5 ; SI-SDAG-NEXT: v_sub_f32_e32 v7, v5, v6 -; SI-SDAG-NEXT: v_fma_f32 v0, s6, v1, v0 +; SI-SDAG-NEXT: v_fma_f32 v0, s4, v1, v0 ; SI-SDAG-NEXT: v_add_f32_e32 v0, v7, v0 ; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0 ; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v5, v6 ; SI-SDAG-NEXT: v_ldexp_f32_e32 v2, v2, v3 ; SI-SDAG-NEXT: v_mov_b32_e32 v3, 0xc2ce8ed0 -; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v3 +; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s5, v3 ; SI-SDAG-NEXT: v_mov_b32_e32 v4, 0x42b17218 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc ; SI-SDAG-NEXT: v_mov_b32_e32 v6, 0x7f800000 -; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s7, v4 +; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s5, v4 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc ; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v5 -; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s6, v3 +; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s4, v3 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v4 -; SI-SDAG-NEXT: s_mov_b32 s0, s4 -; SI-SDAG-NEXT: s_mov_b32 s1, s5 +; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s4, v4 +; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000 +; SI-SDAG-NEXT: s_mov_b32 s2, -1 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc ; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-SDAG-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll index d12ebe49814d8..3928ec2dd76d3 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll @@ -522,42 +522,41 @@ define amdgpu_kernel void @s_exp10_v2f32(ptr addrspace(1) %out, <2 x float> %in) ; ; SI-SDAG-LABEL: s_exp10_v2f32: ; SI-SDAG: ; %bb.0: -; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x40549a78 ; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x33979a37 -; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000 -; SI-SDAG-NEXT: s_mov_b32 s2, -1 ; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; SI-SDAG-NEXT: v_mul_f32_e32 v2, s7, v0 +; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-SDAG-NEXT: v_mul_f32_e32 v2, s5, v0 ; SI-SDAG-NEXT: v_rndne_f32_e32 v3, v2 -; SI-SDAG-NEXT: v_fma_f32 v4, s7, v0, -v2 +; SI-SDAG-NEXT: v_fma_f32 v4, s5, v0, -v2 ; SI-SDAG-NEXT: v_sub_f32_e32 v2, v2, v3 -; SI-SDAG-NEXT: v_fma_f32 v4, s7, v1, v4 +; SI-SDAG-NEXT: v_fma_f32 v4, s5, v1, v4 ; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v4 -; SI-SDAG-NEXT: v_mul_f32_e32 v5, s6, v0 -; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2 +; SI-SDAG-NEXT: v_mul_f32_e32 v5, s4, v0 ; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v3, v3 +; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2 ; SI-SDAG-NEXT: v_rndne_f32_e32 v6, v5 -; SI-SDAG-NEXT: v_fma_f32 v0, s6, v0, -v5 +; SI-SDAG-NEXT: v_fma_f32 v0, s4, v0, -v5 ; SI-SDAG-NEXT: v_sub_f32_e32 v7, v5, v6 -; SI-SDAG-NEXT: v_fma_f32 v0, s6, v1, v0 +; SI-SDAG-NEXT: v_fma_f32 v0, s4, v1, v0 ; SI-SDAG-NEXT: v_add_f32_e32 v0, v7, v0 ; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0 ; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v5, v6 ; SI-SDAG-NEXT: v_ldexp_f32_e32 v2, v2, v3 ; SI-SDAG-NEXT: v_mov_b32_e32 v3, 0xc23369f4 -; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v3 +; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s5, v3 ; SI-SDAG-NEXT: v_mov_b32_e32 v4, 0x421a209b ; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc ; SI-SDAG-NEXT: v_mov_b32_e32 v6, 0x7f800000 -; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s7, v4 +; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s5, v4 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc ; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v5 -; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s6, v3 +; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s4, v3 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v4 -; SI-SDAG-NEXT: s_mov_b32 s0, s4 -; SI-SDAG-NEXT: s_mov_b32 s1, s5 +; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s4, v4 +; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000 +; SI-SDAG-NEXT: s_mov_b32 s2, -1 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc ; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-SDAG-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll index e30a58699fadb..dd44a1a35067e 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll @@ -176,26 +176,25 @@ define amdgpu_kernel void @s_exp2_v2f32(ptr addrspace(1) %out, <2 x float> %in) ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0xc2fc0000 ; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x42800000 -; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000 -; SI-SDAG-NEXT: s_mov_b32 s6, -1 ; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s3, v0 +; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s5, v0 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v1, vcc -; SI-SDAG-NEXT: s_mov_b32 s4, s0 -; SI-SDAG-NEXT: s_mov_b32 s5, s1 -; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec -; SI-SDAG-NEXT: v_add_f32_e32 v2, s3, v2 -; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s2, v0 -; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2 +; SI-SDAG-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s4, v0 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; SI-SDAG-NEXT: v_add_f32_e32 v0, s2, v0 +; SI-SDAG-NEXT: v_add_f32_e32 v2, s5, v2 +; SI-SDAG-NEXT: v_add_f32_e32 v0, s4, v0 +; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2 ; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0 -; SI-SDAG-NEXT: s_cselect_b32 s0, 0xffffffc0, 0 -; SI-SDAG-NEXT: v_ldexp_f32_e64 v1, v2, s0 -; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec -; SI-SDAG-NEXT: s_cselect_b32 s0, 0xffffffc0, 0 -; SI-SDAG-NEXT: v_ldexp_f32_e64 v0, v0, s0 -; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-SDAG-NEXT: s_cselect_b32 s6, 0xffffffc0, 0 +; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-SDAG-NEXT: s_cselect_b32 s4, 0xffffffc0, 0 +; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000 +; SI-SDAG-NEXT: s_mov_b32 s2, -1 +; SI-SDAG-NEXT: v_ldexp_f32_e64 v1, v2, s6 +; SI-SDAG-NEXT: v_ldexp_f32_e64 v0, v0, s4 +; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-SDAG-NEXT: s_endpgm ; ; SI-GISEL-LABEL: s_exp2_v2f32: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll index b5038c8f606ab..fc6b2d95b2af8 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll @@ -321,39 +321,38 @@ define amdgpu_kernel void @s_log_f32(ptr addrspace(1) %out, float %in) { define amdgpu_kernel void @s_log_v2f32(ptr addrspace(1) %out, <2 x float> %in) { ; SI-SDAG-LABEL: s_log_v2f32: ; SI-SDAG: ; %bb.0: -; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x800000 ; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x41b17218 -; SI-SDAG-NEXT: s_mov_b32 s8, 0x3377d1cf +; SI-SDAG-NEXT: s_mov_b32 s8, 0x3f317217 ; SI-SDAG-NEXT: s_mov_b32 s9, 0x7f800000 ; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s7, v0 -; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec -; SI-SDAG-NEXT: s_cselect_b32 s0, 32, 0 -; SI-SDAG-NEXT: v_mov_b32_e32 v3, s0 -; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s7, v3 +; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s5, v0 +; SI-SDAG-NEXT: s_and_b64 s[2:3], vcc, exec +; SI-SDAG-NEXT: s_cselect_b32 s2, 32, 0 +; SI-SDAG-NEXT: v_mov_b32_e32 v3, s2 +; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s5, v3 ; SI-SDAG-NEXT: v_log_f32_e32 v3, v3 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v1, vcc -; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s6, v0 -; SI-SDAG-NEXT: s_mov_b32 s0, s4 -; SI-SDAG-NEXT: s_mov_b32 s1, s5 -; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-SDAG-NEXT: s_mov_b32 s7, 0x3f317217 +; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s4, v0 +; SI-SDAG-NEXT: s_and_b64 s[6:7], vcc, exec ; SI-SDAG-NEXT: v_mul_f32_e32 v4, 0x3f317217, v3 -; SI-SDAG-NEXT: s_cselect_b32 s4, 32, 0 -; SI-SDAG-NEXT: v_fma_f32 v5, v3, s7, -v4 +; SI-SDAG-NEXT: s_cselect_b32 s6, 32, 0 +; SI-SDAG-NEXT: s_mov_b32 s5, 0x3377d1cf +; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, -v4 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; SI-SDAG-NEXT: v_mov_b32_e32 v1, s4 -; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, v5 -; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s6, v1 +; SI-SDAG-NEXT: v_mov_b32_e32 v1, s6 +; SI-SDAG-NEXT: v_fma_f32 v5, v3, s5, v5 +; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s4, v1 ; SI-SDAG-NEXT: v_add_f32_e32 v4, v4, v5 ; SI-SDAG-NEXT: v_log_f32_e32 v5, v1 ; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v3|, s9 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc ; SI-SDAG-NEXT: v_sub_f32_e32 v1, v1, v2 ; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3f317217, v5 -; SI-SDAG-NEXT: v_fma_f32 v3, v5, s7, -v2 -; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, v3 +; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, -v2 +; SI-SDAG-NEXT: v_fma_f32 v3, v5, s5, v3 ; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v3 ; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v5|, s9 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll index 7465b492d75ea..a141bceb3ce86 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll @@ -321,39 +321,38 @@ define amdgpu_kernel void @s_log10_f32(ptr addrspace(1) %out, float %in) { define amdgpu_kernel void @s_log10_v2f32(ptr addrspace(1) %out, <2 x float> %in) { ; SI-SDAG-LABEL: s_log10_v2f32: ; SI-SDAG: ; %bb.0: -; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x800000 ; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x411a209b -; SI-SDAG-NEXT: s_mov_b32 s8, 0x3284fbcf +; SI-SDAG-NEXT: s_mov_b32 s8, 0x3e9a209a ; SI-SDAG-NEXT: s_mov_b32 s9, 0x7f800000 ; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s7, v0 -; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec -; SI-SDAG-NEXT: s_cselect_b32 s0, 32, 0 -; SI-SDAG-NEXT: v_mov_b32_e32 v3, s0 -; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s7, v3 +; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s5, v0 +; SI-SDAG-NEXT: s_and_b64 s[2:3], vcc, exec +; SI-SDAG-NEXT: s_cselect_b32 s2, 32, 0 +; SI-SDAG-NEXT: v_mov_b32_e32 v3, s2 +; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s5, v3 ; SI-SDAG-NEXT: v_log_f32_e32 v3, v3 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v1, vcc -; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s6, v0 -; SI-SDAG-NEXT: s_mov_b32 s0, s4 -; SI-SDAG-NEXT: s_mov_b32 s1, s5 -; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-SDAG-NEXT: s_mov_b32 s7, 0x3e9a209a +; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s4, v0 +; SI-SDAG-NEXT: s_and_b64 s[6:7], vcc, exec ; SI-SDAG-NEXT: v_mul_f32_e32 v4, 0x3e9a209a, v3 -; SI-SDAG-NEXT: s_cselect_b32 s4, 32, 0 -; SI-SDAG-NEXT: v_fma_f32 v5, v3, s7, -v4 +; SI-SDAG-NEXT: s_cselect_b32 s6, 32, 0 +; SI-SDAG-NEXT: s_mov_b32 s5, 0x3284fbcf +; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, -v4 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; SI-SDAG-NEXT: v_mov_b32_e32 v1, s4 -; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, v5 -; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s6, v1 +; SI-SDAG-NEXT: v_mov_b32_e32 v1, s6 +; SI-SDAG-NEXT: v_fma_f32 v5, v3, s5, v5 +; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s4, v1 ; SI-SDAG-NEXT: v_add_f32_e32 v4, v4, v5 ; SI-SDAG-NEXT: v_log_f32_e32 v5, v1 ; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v3|, s9 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc ; SI-SDAG-NEXT: v_sub_f32_e32 v1, v1, v2 ; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3e9a209a, v5 -; SI-SDAG-NEXT: v_fma_f32 v3, v5, s7, -v2 -; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, v3 +; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, -v2 +; SI-SDAG-NEXT: v_fma_f32 v3, v5, s5, v3 ; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v3 ; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v5|, s9 ; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll index 61a777f8877bb..b1407d39674ad 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll @@ -221,8 +221,6 @@ define amdgpu_kernel void @s_log2_v2f32(ptr addrspace(1) %out, <2 x float> %in) ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x800000 ; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x42000000 -; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000 -; SI-SDAG-NEXT: s_mov_b32 s6, -1 ; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0) ; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s3, v0 ; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec @@ -238,11 +236,11 @@ define amdgpu_kernel void @s_log2_v2f32(ptr addrspace(1) %out, <2 x float> %in) ; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s2, v1 ; SI-SDAG-NEXT: v_log_f32_e32 v3, v3 ; SI-SDAG-NEXT: v_log_f32_e32 v4, v1 -; SI-SDAG-NEXT: s_mov_b32 s4, s0 -; SI-SDAG-NEXT: s_mov_b32 s5, s1 +; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000 +; SI-SDAG-NEXT: s_mov_b32 s2, -1 ; SI-SDAG-NEXT: v_sub_f32_e32 v1, v3, v2 ; SI-SDAG-NEXT: v_sub_f32_e32 v0, v4, v0 -; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-SDAG-NEXT: s_endpgm ; ; SI-GISEL-LABEL: s_log2_v2f32: diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll index 6dc919988cc4f..b6eaaf1369ab4 100644 --- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll @@ -326,12 +326,12 @@ define void @local_atomic_fadd_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v1 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v2 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB2_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -347,12 +347,12 @@ define void @local_atomic_fadd_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v1 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v2 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB2_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -440,12 +440,12 @@ define void @local_atomic_fadd_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v1 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v2 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB3_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -462,12 +462,12 @@ define void @local_atomic_fadd_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX6-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v1 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v2 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB3_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -880,13 +880,14 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_add_f64_e32 v[3:4], 4.0, v[1:2] +; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_add_f64_e32 v[1:2], 4.0, v[3:4] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] +; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -913,13 +914,14 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 +; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] +; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -936,14 +938,14 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 +; GFX10-NEXT: v_mov_b32_e32 v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v3, v1 +; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX10-NEXT: v_mov_b32_e32 v1, v3 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB6_1 @@ -968,13 +970,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 -; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX908-NEXT: v_mov_b32_e32 v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v3, v1 +; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 +; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB6_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -990,13 +992,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 -; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX8-NEXT: v_mov_b32_e32 v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v3, v1 +; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 +; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v2, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB6_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1012,13 +1014,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 -; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX7-NEXT: v_mov_b32_e32 v4, v2 +; GFX7-NEXT: v_mov_b32_e32 v3, v1 +; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 +; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v2, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB6_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1034,13 +1036,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 -; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX6-NEXT: v_mov_b32_e32 v4, v2 +; GFX6-NEXT: v_mov_b32_e32 v3, v1 +; GFX6-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 +; GFX6-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX6-NEXT: v_mov_b32_e32 v1, v3 +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v2, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB6_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1063,13 +1065,14 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_add_f64_e32 v[3:4], 4.0, v[1:2] +; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_add_f64_e32 v[1:2], 4.0, v[3:4] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528 +; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -1096,13 +1099,14 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 +; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528 +; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -1119,14 +1123,14 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 +; GFX10-NEXT: v_mov_b32_e32 v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v3, v1 +; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528 +; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX10-NEXT: v_mov_b32_e32 v1, v3 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB7_1 @@ -1151,13 +1155,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 -; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528 +; GFX908-NEXT: v_mov_b32_e32 v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v3, v1 +; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 +; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1173,13 +1177,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 -; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528 +; GFX8-NEXT: v_mov_b32_e32 v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v3, v1 +; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 +; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v2, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1195,13 +1199,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], 4.0 -; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528 +; GFX7-NEXT: v_mov_b32_e32 v4, v2 +; GFX7-NEXT: v_mov_b32_e32 v3, v1 +; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], 4.0 +; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v2, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB7_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1218,13 +1222,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX6-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f64 v[3:4], v[0:1], 4.0 -; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v2, v[0:1], v[3:4] +; GFX6-NEXT: v_mov_b32_e32 v4, v1 +; GFX6-NEXT: v_mov_b32_e32 v3, v0 +; GFX6-NEXT: v_add_f64 v[0:1], v[3:4], 4.0 +; GFX6-NEXT: ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1] ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[0:1] -; GFX6-NEXT: v_mov_b32_e32 v0, v3 +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4] ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB7_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2032,27 +2036,27 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -2073,28 +2077,28 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -2119,15 +2123,15 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX942-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX942-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB10_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2140,27 +2144,27 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2175,28 +2179,28 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2211,23 +2215,23 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: ds_read_b32 v2, v1 -; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX10-NEXT: ds_read_b32 v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX10-NEXT: v_and_b32_e32 v0, 24, v0 -; GFX10-NEXT: v_not_b32_e32 v3, v3 +; GFX10-NEXT: v_not_b32_e32 v2, v2 ; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX10-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX10-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB10_1 @@ -2249,15 +2253,15 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX90A-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX90A-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB10_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2278,15 +2282,15 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX908-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX908-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB10_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2308,16 +2312,16 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX8-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX8-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB10_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2338,18 +2342,18 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB10_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2370,18 +2374,18 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB10_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2412,19 +2416,19 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -2455,19 +2459,20 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -2493,15 +2498,15 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX942-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX942-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB11_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2524,19 +2529,19 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2561,19 +2566,20 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2596,16 +2602,16 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX10-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX10-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB11_1 @@ -2628,15 +2634,15 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX90A-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX90A-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB11_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2658,15 +2664,15 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX908-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX908-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB11_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2689,16 +2695,16 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX8-NEXT: v_add_f16_e32 v4, 4.0, v4 -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX8-NEXT: v_add_f16_e32 v3, 4.0, v3 +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB11_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2720,18 +2726,18 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB11_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2753,18 +2759,18 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB11_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3086,16 +3092,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_add_f16_e32 v2.l, 4.0, v1.l -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, 4.0, v2.l +; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -3118,16 +3124,17 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v1 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, 4.0, v2 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -3147,13 +3154,13 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_add_f16_e32 v2, 4.0, v1 -; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_add_f16_e32 v1, 4.0, v2 +; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB13_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3168,16 +3175,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_add_f16_e32 v2.l, 4.0, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, 4.0, v2.l +; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -3194,16 +3201,17 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v1 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, 4.0, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -3220,15 +3228,15 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_f16_e32 v2, 4.0, v1 -; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_add_f16_e32 v1, 4.0, v2 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB13_1 @@ -3245,13 +3253,13 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_add_f16_e32 v2, 4.0, v1 -; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_add_f16_e32 v1, 4.0, v2 +; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB13_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3267,13 +3275,13 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_add_f16_e32 v2, 4.0, v1 -; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_add_f16_e32 v1, 4.0, v2 +; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB13_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3289,14 +3297,14 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f16_e32 v2, 4.0, v1 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_add_f16_e32 v1, 4.0, v2 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX8-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB13_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3312,16 +3320,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 -; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2 -; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB13_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3338,16 +3346,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1 -; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2 +; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB13_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4297,38 +4305,38 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -4349,37 +4357,37 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -4405,22 +4413,22 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB16_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4433,38 +4441,38 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4479,37 +4487,37 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4524,28 +4532,28 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: ds_read_b32 v2, v1 -; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX10-NEXT: ds_read_b32 v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX10-NEXT: v_and_b32_e32 v0, 24, v0 -; GFX10-NEXT: v_not_b32_e32 v3, v3 +; GFX10-NEXT: v_not_b32_e32 v2, v2 ; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB16_1 @@ -4568,20 +4576,20 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX90A-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX90A-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB16_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4603,20 +4611,20 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX908-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX908-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB16_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4638,22 +4646,22 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB16_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4674,18 +4682,18 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB16_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4706,18 +4714,18 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB16_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4748,29 +4756,30 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5 -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -4801,28 +4810,29 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -4849,22 +4859,22 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB17_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4888,28 +4898,29 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5 -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4935,27 +4946,28 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4978,21 +4990,21 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB17_1 @@ -5016,20 +5028,20 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX90A-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX90A-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB17_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5052,20 +5064,20 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX908-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX908-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB17_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5088,22 +5100,22 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB17_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5125,18 +5137,18 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB17_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5158,18 +5170,18 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB17_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5569,26 +5581,27 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX12-TRUE16-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h -; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -5611,25 +5624,26 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX12-FAKE16-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -5650,21 +5664,21 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX942-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX942-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB19_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5680,25 +5694,26 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h -; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -5716,24 +5731,25 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -5750,21 +5766,21 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX10-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX10-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB19_1 @@ -5782,20 +5798,20 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX90A-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX90A-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB19_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5812,20 +5828,20 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX908-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX908-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB19_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5841,21 +5857,21 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX8-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX8-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1 ; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc -; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc +; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB19_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5871,16 +5887,16 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB19_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5897,16 +5913,16 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v2 -; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v1 +; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB19_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6399,13 +6415,14 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_pk_add_f16 v3, v2, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_pk_add_f16 v2, v3, v1 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX11-NEXT: v_mov_b32_e32 v2, v3 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -6422,13 +6439,13 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_pk_add_f16 v3, v2, v1 +; GFX10-NEXT: v_mov_b32_e32 v3, v2 +; GFX10-NEXT: v_pk_add_f16 v2, v3, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v3 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB22_1 @@ -6444,12 +6461,12 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX90A-NEXT: v_mov_b32_e32 v3, v2 +; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v2, v3 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB22_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6464,12 +6481,12 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_pk_add_f16 v3, v2, v1 -; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX908-NEXT: v_mov_b32_e32 v3, v2 +; GFX908-NEXT: v_pk_add_f16 v2, v3, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v3 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB22_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6485,14 +6502,14 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_e32 v4, v2, v1 -; GFX8-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX8-NEXT: v_mov_b32_e32 v3, v2 +; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_add_f16_e32 v4, v3, v1 +; GFX8-NEXT: v_or_b32_e32 v2, v4, v2 +; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v2, v3 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB22_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6614,13 +6631,14 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_pk_add_f16 v3, v2, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_pk_add_f16 v2, v3, v1 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX11-NEXT: v_mov_b32_e32 v2, v3 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -6637,13 +6655,13 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_pk_add_f16 v3, v2, v1 +; GFX10-NEXT: v_mov_b32_e32 v3, v2 +; GFX10-NEXT: v_pk_add_f16 v2, v3, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v3 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB23_1 @@ -6659,12 +6677,12 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX90A-NEXT: v_mov_b32_e32 v3, v2 +; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v2, v3 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB23_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6679,12 +6697,12 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_pk_add_f16 v3, v2, v1 -; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX908-NEXT: v_mov_b32_e32 v3, v2 +; GFX908-NEXT: v_pk_add_f16 v2, v3, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v3 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB23_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6700,14 +6718,14 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_e32 v4, v2, v1 -; GFX8-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX8-NEXT: v_mov_b32_e32 v3, v2 +; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_add_f16_e32 v4, v3, v1 +; GFX8-NEXT: v_or_b32_e32 v2, v4, v2 +; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v2, v3 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB23_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7547,30 +7565,32 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, v4, v1 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -7591,30 +7611,32 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1 @@ -7634,27 +7656,27 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX10-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX10-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX10-NEXT: v_add_f32_e32 v5, v5, v1 -; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3 ; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4 +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4 -; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4 +; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execnz .LBB26_1 @@ -7674,26 +7696,26 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX90A-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX90A-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX90A-NEXT: v_add_f32_e32 v5, v5, v1 -; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB26_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7712,26 +7734,26 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX908-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX908-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX908-NEXT: v_add_f32_e32 v5, v5, v1 -; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB26_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7749,29 +7771,29 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX8-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX8-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX8-NEXT: v_add_f32_e32 v5, v5, v1 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8 ; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB26_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7888,30 +7910,32 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, v4, v1 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -7932,30 +7956,32 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1 @@ -7975,27 +8001,27 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX10-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX10-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX10-NEXT: v_add_f32_e32 v5, v5, v1 -; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3 ; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4 +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4 -; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4 +; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execnz .LBB27_1 @@ -8015,26 +8041,26 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX90A-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX90A-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX90A-NEXT: v_add_f32_e32 v5, v5, v1 -; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB27_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8053,26 +8079,26 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX908-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX908-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX908-NEXT: v_add_f32_e32 v5, v5, v1 -; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB27_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8090,29 +8116,29 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX8-NEXT: v_add_f32_e32 v4, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX8-NEXT: v_add_f32_e32 v3, v3, v2 ; GFX8-NEXT: v_add_f32_e32 v5, v5, v1 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8 ; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB27_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8849,20 +8875,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX7-NEXT: ; %bb.5: ; GFX7-NEXT: s_lshl_b32 s0, s3, 4 ; GFX7-NEXT: v_mov_b32_e32 v1, s0 -; GFX7-NEXT: ds_read_b32 v3, v1 +; GFX7-NEXT: ds_read_b32 v2, v1 ; GFX7-NEXT: s_bcnt1_i32_b64 s0, s[8:9] -; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s0 -; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2 +; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s0 +; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB28_6: ; %atomicrmw.start2 ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f32_e32 v4, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v2 +; GFX7-NEXT: v_add_f32_e32 v2, v4, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4 ; GFX7-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB28_6 ; GFX7-NEXT: .LBB28_7: ; %Flow21 @@ -8973,20 +8999,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) ; GFX6-NEXT: ; %bb.5: ; GFX6-NEXT: s_lshl_b32 s0, s3, 4 ; GFX6-NEXT: v_mov_b32_e32 v1, s0 -; GFX6-NEXT: ds_read_b32 v3, v1 +; GFX6-NEXT: ds_read_b32 v2, v1 ; GFX6-NEXT: s_bcnt1_i32_b64 s0, s[8:9] -; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s0 -; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2 +; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s0 +; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB28_6: ; %atomicrmw.start2 ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f32_e32 v4, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v2 +; GFX6-NEXT: v_add_f32_e32 v2, v4, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4 ; GFX6-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB28_6 ; GFX6-NEXT: .LBB28_7: ; %Flow19 @@ -9677,20 +9703,20 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX7-NEXT: ; %bb.5: ; GFX7-NEXT: s_lshl_b32 s0, s3, 4 ; GFX7-NEXT: v_mov_b32_e32 v1, s0 -; GFX7-NEXT: ds_read_b32 v3, v1 +; GFX7-NEXT: ds_read_b32 v2, v1 ; GFX7-NEXT: s_bcnt1_i32_b64 s0, s[8:9] -; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s0 -; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2 +; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s0 +; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB29_6: ; %atomicrmw.start2 ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f32_e32 v4, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v2 +; GFX7-NEXT: v_add_f32_e32 v2, v4, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4 ; GFX7-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB29_6 ; GFX7-NEXT: .LBB29_7: ; %Flow21 @@ -9801,20 +9827,20 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs ; GFX6-NEXT: ; %bb.5: ; GFX6-NEXT: s_lshl_b32 s0, s3, 4 ; GFX6-NEXT: v_mov_b32_e32 v1, s0 -; GFX6-NEXT: ds_read_b32 v3, v1 +; GFX6-NEXT: ds_read_b32 v2, v1 ; GFX6-NEXT: s_bcnt1_i32_b64 s0, s[8:9] -; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s0 -; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2 +; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s0 +; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB29_6: ; %atomicrmw.start2 ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f32_e32 v4, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v2 +; GFX6-NEXT: v_add_f32_e32 v2, v4, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4 ; GFX6-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB29_6 ; GFX6-NEXT: .LBB29_7: ; %Flow19 @@ -10084,12 +10110,12 @@ define void @local_atomic_fadd_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX7-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v1 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v2 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB31_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -10105,12 +10131,12 @@ define void @local_atomic_fadd_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX6-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v1 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v2 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB31_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll index d6b7d8ffaf1c5..8e094a7269a49 100644 --- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll @@ -1598,29 +1598,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, 4.0, v4.l -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, 4.0, v3.l +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -1641,29 +1641,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, 4.0, v4 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -1688,16 +1688,16 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX942-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX942-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX942-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX942-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB10_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1710,29 +1710,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, 4.0, v4.l -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, 4.0, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -1747,29 +1747,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -1784,24 +1784,24 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: ds_read_b32 v2, v1 -; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX10-NEXT: ds_read_b32 v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX10-NEXT: v_and_b32_e32 v0, 24, v0 -; GFX10-NEXT: v_not_b32_e32 v3, v3 +; GFX10-NEXT: v_not_b32_e32 v2, v2 ; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX10-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX10-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX10-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX10-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB10_1 @@ -1823,16 +1823,16 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX90A-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX90A-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB10_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1853,16 +1853,16 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX908-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX908-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX908-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX908-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB10_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1884,17 +1884,17 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX8-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX8-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX8-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX8-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB10_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1915,18 +1915,18 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB10_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1947,18 +1947,18 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB10_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1989,20 +1989,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l -; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, 4.0, v4.l +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, 4.0, v3.l +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -2033,21 +2034,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4 -; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, 4.0, v4 +; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -2073,16 +2074,16 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX942-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX942-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX942-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX942-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB11_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2105,20 +2106,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l -; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, 4.0, v4.l +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, 4.0, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2143,21 +2145,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, 4.0, v4 +; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2180,17 +2182,17 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX10-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX10-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX10-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX10-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB11_1 @@ -2213,16 +2215,16 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX90A-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX90A-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB11_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2244,16 +2246,16 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX908-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX908-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX908-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX908-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB11_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2276,17 +2278,17 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX8-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX8-NEXT: v_max_f16_e32 v4, 4.0, v4 -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX8-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX8-NEXT: v_max_f16_e32 v3, 4.0, v3 +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB11_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2308,18 +2310,18 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB11_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2341,18 +2343,18 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB11_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2685,17 +2687,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v1.l, v1.l -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, 4.0, v2.l -; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v2.l, v2.l +; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, 4.0, v1.l +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -2718,18 +2721,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v1, v1 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, 4.0, v2 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, v2, v2 +; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, 4.0, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -2749,14 +2752,14 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX942-NEXT: v_max_f16_e32 v2, 4.0, v2 -; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX942-NEXT: v_max_f16_e32 v1, 4.0, v1 +; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB13_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2771,17 +2774,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v1.l, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, 4.0, v2.l -; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v2.l, v2.l +; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, 4.0, v1.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2798,18 +2802,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v1, v1 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, 4.0, v2 -; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 -; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 -; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, 4.0, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 +; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: buffer_gl0_inv +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2826,16 +2830,16 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX10-NEXT: v_max_f16_e32 v2, 4.0, v2 -; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX10-NEXT: v_max_f16_e32 v1, 4.0, v1 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB13_1 @@ -2852,14 +2856,14 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX90A-NEXT: v_max_f16_e32 v2, 4.0, v2 -; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX90A-NEXT: v_max_f16_e32 v1, 4.0, v1 +; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB13_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2875,14 +2879,14 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX908-NEXT: v_max_f16_e32 v2, 4.0, v2 -; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX908-NEXT: v_max_f16_e32 v1, 4.0, v1 +; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB13_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2898,15 +2902,15 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_max_f16_e32 v2, 4.0, v2 -; GFX8-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX8-NEXT: v_max_f16_e32 v1, 4.0, v1 +; GFX8-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB13_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2922,16 +2926,16 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 -; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX7-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2 -; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX7-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB13_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2948,16 +2952,16 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1 -; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX6-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2 +; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX6-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB13_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3911,38 +3915,38 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4 -; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3 +; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -3963,37 +3967,37 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4 -; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -4019,22 +4023,22 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB16_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4047,38 +4051,38 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4093,37 +4097,37 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4138,28 +4142,28 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: ds_read_b32 v2, v1 -; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX10-NEXT: ds_read_b32 v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX10-NEXT: v_and_b32_e32 v0, 24, v0 -; GFX10-NEXT: v_not_b32_e32 v3, v3 +; GFX10-NEXT: v_not_b32_e32 v2, v2 ; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB16_1 @@ -4182,20 +4186,20 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX90A-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX90A-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB16_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4217,20 +4221,20 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX908-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX908-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB16_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4252,22 +4256,22 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB16_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4288,19 +4292,19 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB16_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4321,19 +4325,19 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB16_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4364,29 +4368,30 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3 +; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5 -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -4417,28 +4422,29 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -4465,22 +4471,22 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB17_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4504,28 +4510,29 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5 -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4551,27 +4558,28 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4594,21 +4602,21 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB17_1 @@ -4632,20 +4640,20 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX90A-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX90A-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB17_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4668,20 +4676,20 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX908-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX908-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB17_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4704,22 +4712,22 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB17_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4741,19 +4749,19 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB17_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4775,19 +4783,19 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB17_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5189,26 +5197,27 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, 4.0, v2 -; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v1, 4.0, v1 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h -; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -5231,25 +5240,26 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, 4.0, v2 -; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v1, 4.0, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -5270,21 +5280,21 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX942-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX942-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB19_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5300,25 +5310,26 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX11-TRUE16-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h -; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -5336,24 +5347,25 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX11-FAKE16-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -5370,21 +5382,21 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX10-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX10-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB19_1 @@ -5402,20 +5414,20 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX90A-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX90A-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB19_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5432,20 +5444,20 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX908-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX908-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB19_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5461,21 +5473,21 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX8-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX8-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1 ; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc -; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc +; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB19_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5491,17 +5503,17 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; GFX7-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB19_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5518,17 +5530,17 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; GFX6-NEXT: v_max_f32_e32 v2, 4.0, v2 -; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX6-NEXT: v_max_f32_e32 v1, 4.0, v1 +; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB19_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6101,15 +6113,15 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v3, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3 +; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v1 ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 +; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX12-NEXT: v_mov_b32_e32 v2, v3 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -6129,14 +6141,14 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_pk_max_f16 v3, v2, v2 +; GFX942-NEXT: v_mov_b32_e32 v3, v2 +; GFX942-NEXT: v_pk_max_f16 v2, v3, v3 ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_pk_max_f16 v3, v3, v1 -; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX942-NEXT: v_pk_max_f16 v2, v2, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v2, v3 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB22_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6152,15 +6164,15 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_pk_max_f16 v3, v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v2, v2, v1 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX11-NEXT: v_mov_b32_e32 v2, v3 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -6178,14 +6190,14 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX10-NEXT: v_pk_max_f16 v3, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v3, v2 +; GFX10-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX10-NEXT: v_pk_max_f16 v2, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v3 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB22_1 @@ -6202,13 +6214,13 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX90A-NEXT: v_pk_max_f16 v3, v3, v1 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX90A-NEXT: v_mov_b32_e32 v3, v2 +; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX90A-NEXT: v_pk_max_f16 v2, v2, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v2, v3 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB22_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6224,13 +6236,13 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX908-NEXT: v_pk_max_f16 v3, v3, v1 -; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX908-NEXT: v_mov_b32_e32 v3, v2 +; GFX908-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX908-NEXT: v_pk_max_f16 v2, v2, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v3 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB22_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6248,16 +6260,16 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_max_f16_e32 v5, v3, v3 -; GFX8-NEXT: v_max_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_max_f16_e32 v5, v4, v4 +; GFX8-NEXT: v_max_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX8-NEXT: v_max_f16_e32 v5, v5, v1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB22_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6363,15 +6375,15 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v3, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3 +; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v1 ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532 +; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX12-NEXT: v_mov_b32_e32 v2, v3 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -6391,14 +6403,14 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX942-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_pk_max_f16 v3, v2, v2 +; GFX942-NEXT: v_mov_b32_e32 v3, v2 +; GFX942-NEXT: v_pk_max_f16 v2, v3, v3 ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_pk_max_f16 v3, v3, v1 -; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX942-NEXT: v_pk_max_f16 v2, v2, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v2, v3 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB23_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6414,15 +6426,15 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_pk_max_f16 v3, v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v2, v2, v1 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX11-NEXT: v_mov_b32_e32 v2, v3 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -6440,14 +6452,14 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX10-NEXT: v_pk_max_f16 v3, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v3, v2 +; GFX10-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX10-NEXT: v_pk_max_f16 v2, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v3 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB23_1 @@ -6464,13 +6476,13 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX90A-NEXT: v_pk_max_f16 v3, v3, v1 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX90A-NEXT: v_mov_b32_e32 v3, v2 +; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX90A-NEXT: v_pk_max_f16 v2, v2, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v2, v3 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB23_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6486,13 +6498,13 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX908-NEXT: v_pk_max_f16 v3, v3, v1 -; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX908-NEXT: v_mov_b32_e32 v3, v2 +; GFX908-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX908-NEXT: v_pk_max_f16 v2, v2, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v3 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB23_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6510,16 +6522,16 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_max_f16_e32 v5, v3, v3 -; GFX8-NEXT: v_max_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_max_f16_e32 v5, v4, v4 +; GFX8-NEXT: v_max_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX8-NEXT: v_max_f16_e32 v5, v5, v1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB23_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7589,31 +7601,34 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, v4, v1 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v1 ; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -7638,32 +7653,33 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, v4, v2 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2 ; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 +; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo +; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff -; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -7686,27 +7702,27 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX942-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX942-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX942-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4 +; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4 ; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4 ; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4 +; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1] -; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1] +; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB26_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7724,30 +7740,32 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, v4, v1 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -7768,30 +7786,32 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1 @@ -7811,27 +7831,27 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX10-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX10-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX10-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3 ; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4 +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4 -; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4 +; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execnz .LBB26_1 @@ -7851,26 +7871,26 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX90A-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX90A-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX90A-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB26_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7889,26 +7909,26 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX908-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX908-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX908-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB26_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7926,29 +7946,29 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX8-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX8-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX8-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8 ; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB26_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8047,31 +8067,34 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, v4, v1 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v1 ; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -8096,32 +8119,33 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, v4, v2 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2 ; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 +; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo +; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff -; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -8144,27 +8168,27 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX942-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX942-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX942-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4 +; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4 ; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4 ; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4 +; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1] -; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1] +; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB27_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8182,30 +8206,32 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, v4, v1 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -8226,30 +8252,32 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1 @@ -8269,27 +8297,27 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX10-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX10-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX10-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3 ; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4 +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4 -; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4 +; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execnz .LBB27_1 @@ -8309,26 +8337,26 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX90A-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX90A-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX90A-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB27_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8347,26 +8375,26 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX908-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX908-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX908-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB27_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8384,29 +8412,29 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX8-NEXT: v_max_f32_e32 v4, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX8-NEXT: v_max_f32_e32 v3, v3, v2 ; GFX8-NEXT: v_max_f32_e32 v5, v5, v1 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8 ; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB27_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll index 11ed43d737634..0aa8d33ea7429 100644 --- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll @@ -1598,29 +1598,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v4.l, 4.0, v4.l -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, 4.0, v3.l +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -1641,29 +1641,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v4, 4.0, v4 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -1688,16 +1688,16 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX942-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX942-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX942-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX942-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB10_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1710,29 +1710,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_min_f16_e32 v4.l, 4.0, v4.l -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, 4.0, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -1747,29 +1747,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -1784,24 +1784,24 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: ds_read_b32 v2, v1 -; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX10-NEXT: ds_read_b32 v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX10-NEXT: v_and_b32_e32 v0, 24, v0 -; GFX10-NEXT: v_not_b32_e32 v3, v3 +; GFX10-NEXT: v_not_b32_e32 v2, v2 ; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX10-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX10-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX10-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX10-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB10_1 @@ -1823,16 +1823,16 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX90A-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX90A-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB10_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1853,16 +1853,16 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX908-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX908-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX908-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX908-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB10_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1884,17 +1884,17 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX8-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX8-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX8-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX8-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB10_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1915,18 +1915,18 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB10_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1947,18 +1947,18 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB10_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1989,20 +1989,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l -; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v4.l, 4.0, v4.l +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, 4.0, v3.l +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -2033,21 +2034,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4 -; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v4, 4.0, v4 +; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -2073,16 +2074,16 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX942-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX942-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX942-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX942-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB11_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2105,20 +2106,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l -; GFX11-TRUE16-NEXT: v_min_f16_e32 v4.l, 4.0, v4.l +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, 4.0, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2143,21 +2145,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX11-FAKE16-NEXT: v_min_f16_e32 v4, 4.0, v4 +; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2180,17 +2182,17 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX10-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX10-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX10-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX10-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB11_1 @@ -2213,16 +2215,16 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX90A-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX90A-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB11_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2244,16 +2246,16 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX908-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX908-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX908-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX908-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB11_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2276,17 +2278,17 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX8-NEXT: v_max_f16_e32 v4, v4, v4 -; GFX8-NEXT: v_min_f16_e32 v4, 4.0, v4 -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX8-NEXT: v_max_f16_e32 v3, v3, v3 +; GFX8-NEXT: v_min_f16_e32 v3, 4.0, v3 +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB11_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2308,18 +2310,18 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB11_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2341,18 +2343,18 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB11_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2685,17 +2687,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v1.l, v1.l -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v2.l, 4.0, v2.l -; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v2.l, v2.l +; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v1.l, 4.0, v1.l +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -2718,18 +2721,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v1, v1 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, 4.0, v2 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, v2, v2 +; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v1, 4.0, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -2749,14 +2752,14 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX942-NEXT: v_min_f16_e32 v2, 4.0, v2 -; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX942-NEXT: v_min_f16_e32 v1, 4.0, v1 +; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB13_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2771,17 +2774,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v1.l, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_min_f16_e32 v2.l, 4.0, v2.l -; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v2.l, v2.l +; GFX11-TRUE16-NEXT: v_min_f16_e32 v1.l, 4.0, v1.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2798,18 +2802,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v1, v1 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, 4.0, v2 -; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 -; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 -; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX11-FAKE16-NEXT: v_min_f16_e32 v1, 4.0, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 +; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: buffer_gl0_inv +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2826,16 +2830,16 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX10-NEXT: v_min_f16_e32 v2, 4.0, v2 -; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX10-NEXT: v_min_f16_e32 v1, 4.0, v1 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB13_1 @@ -2852,14 +2856,14 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX90A-NEXT: v_min_f16_e32 v2, 4.0, v2 -; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX90A-NEXT: v_min_f16_e32 v1, 4.0, v1 +; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB13_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2875,14 +2879,14 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX908-NEXT: v_min_f16_e32 v2, 4.0, v2 -; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX908-NEXT: v_min_f16_e32 v1, 4.0, v1 +; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB13_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2898,15 +2902,15 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_max_f16_e32 v2, v1, v1 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_min_f16_e32 v2, 4.0, v2 -; GFX8-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_max_f16_e32 v1, v2, v2 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX8-NEXT: v_min_f16_e32 v1, 4.0, v1 +; GFX8-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB13_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2922,16 +2926,16 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 -; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX7-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2 -; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX7-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB13_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2948,16 +2952,16 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1 -; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX6-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2 +; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX6-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB13_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3911,38 +3915,38 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4 -; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3 +; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -3963,37 +3967,37 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4 -; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -4019,22 +4023,22 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB16_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4047,38 +4051,38 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4093,37 +4097,37 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4138,28 +4142,28 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: ds_read_b32 v2, v1 -; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX10-NEXT: ds_read_b32 v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX10-NEXT: v_and_b32_e32 v0, 24, v0 -; GFX10-NEXT: v_not_b32_e32 v3, v3 +; GFX10-NEXT: v_not_b32_e32 v2, v2 ; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB16_1 @@ -4182,20 +4186,20 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX90A-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX90A-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB16_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4217,20 +4221,20 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX908-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX908-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB16_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4252,22 +4256,22 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB16_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4288,19 +4292,19 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB16_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4321,19 +4325,19 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB16_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4364,29 +4368,30 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3 +; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5 -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -4417,28 +4422,29 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3 +; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -4465,22 +4471,22 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB17_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4504,28 +4510,29 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5 -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4551,27 +4558,28 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4594,21 +4602,21 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB17_1 @@ -4632,20 +4640,20 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX90A-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX90A-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB17_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4668,20 +4676,20 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX908-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX908-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB17_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4704,22 +4712,22 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB17_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4741,19 +4749,19 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB17_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4775,19 +4783,19 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB17_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5189,26 +5197,27 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, 4.0, v2 -; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v1, 4.0, v1 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h -; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -5231,25 +5240,26 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, 4.0, v2 -; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v1, 4.0, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -5270,21 +5280,21 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX942-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX942-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB19_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5300,25 +5310,26 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX11-TRUE16-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h -; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -5336,24 +5347,25 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX11-FAKE16-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -5370,21 +5382,21 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX10-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX10-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB19_1 @@ -5402,20 +5414,20 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX90A-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX90A-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB19_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5432,20 +5444,20 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX908-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX908-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB19_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5461,21 +5473,21 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX8-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX8-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1 ; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc -; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc +; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB19_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5491,17 +5503,17 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; GFX7-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB19_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5518,17 +5530,17 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; GFX6-NEXT: v_min_f32_e32 v2, 4.0, v2 -; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX6-NEXT: v_min_f32_e32 v1, 4.0, v1 +; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB19_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6101,15 +6113,15 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_pk_min_num_f16 v3, v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v3, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3 +; GFX12-NEXT: v_pk_min_num_f16 v2, v2, v1 ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 +; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX12-NEXT: v_mov_b32_e32 v2, v3 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -6129,14 +6141,14 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_pk_max_f16 v3, v2, v2 +; GFX942-NEXT: v_mov_b32_e32 v3, v2 +; GFX942-NEXT: v_pk_max_f16 v2, v3, v3 ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_pk_min_f16 v3, v3, v1 -; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX942-NEXT: v_pk_min_f16 v2, v2, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v2, v3 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB22_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6152,15 +6164,15 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_pk_min_f16 v3, v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX11-NEXT: v_pk_min_f16 v2, v2, v1 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX11-NEXT: v_mov_b32_e32 v2, v3 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -6178,14 +6190,14 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX10-NEXT: v_pk_min_f16 v3, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v3, v2 +; GFX10-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX10-NEXT: v_pk_min_f16 v2, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v3 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB22_1 @@ -6202,13 +6214,13 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX90A-NEXT: v_pk_min_f16 v3, v3, v1 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX90A-NEXT: v_mov_b32_e32 v3, v2 +; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX90A-NEXT: v_pk_min_f16 v2, v2, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v2, v3 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB22_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6224,13 +6236,13 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX908-NEXT: v_pk_min_f16 v3, v3, v1 -; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX908-NEXT: v_mov_b32_e32 v3, v2 +; GFX908-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX908-NEXT: v_pk_min_f16 v2, v2, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v3 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB22_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6248,16 +6260,16 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_max_f16_e32 v5, v3, v3 -; GFX8-NEXT: v_min_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_max_f16_e32 v5, v4, v4 +; GFX8-NEXT: v_min_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX8-NEXT: v_min_f16_e32 v5, v5, v1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB22_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6363,15 +6375,15 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_pk_min_num_f16 v3, v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v3, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3 +; GFX12-NEXT: v_pk_min_num_f16 v2, v2, v1 ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532 +; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX12-NEXT: v_mov_b32_e32 v2, v3 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -6391,14 +6403,14 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX942-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_pk_max_f16 v3, v2, v2 +; GFX942-NEXT: v_mov_b32_e32 v3, v2 +; GFX942-NEXT: v_pk_max_f16 v2, v3, v3 ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_pk_min_f16 v3, v3, v1 -; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX942-NEXT: v_pk_min_f16 v2, v2, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v2, v3 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB23_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6414,15 +6426,15 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_pk_min_f16 v3, v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX11-NEXT: v_pk_min_f16 v2, v2, v1 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX11-NEXT: v_mov_b32_e32 v2, v3 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -6440,14 +6452,14 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX10-NEXT: v_pk_min_f16 v3, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v3, v2 +; GFX10-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX10-NEXT: v_pk_min_f16 v2, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v3 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB23_1 @@ -6464,13 +6476,13 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX90A-NEXT: v_pk_min_f16 v3, v3, v1 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX90A-NEXT: v_mov_b32_e32 v3, v2 +; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX90A-NEXT: v_pk_min_f16 v2, v2, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v2, v3 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB23_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6486,13 +6498,13 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_pk_max_f16 v3, v2, v2 -; GFX908-NEXT: v_pk_min_f16 v3, v3, v1 -; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX908-NEXT: v_mov_b32_e32 v3, v2 +; GFX908-NEXT: v_pk_max_f16 v2, v3, v3 +; GFX908-NEXT: v_pk_min_f16 v2, v2, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v3 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB23_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6510,16 +6522,16 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_max_f16_e32 v5, v3, v3 -; GFX8-NEXT: v_min_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_max_f16_e32 v5, v4, v4 +; GFX8-NEXT: v_min_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX8-NEXT: v_min_f16_e32 v5, v5, v1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB23_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7589,31 +7601,34 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, v4, v1 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v1 ; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -7638,32 +7653,33 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, v4, v2 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2 ; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 +; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo +; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff -; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -7686,27 +7702,27 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX942-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX942-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX942-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4 +; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4 ; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4 ; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4 +; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1] -; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1] +; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB26_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7724,30 +7740,32 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, v4, v1 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -7768,30 +7786,32 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1 @@ -7811,27 +7831,27 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX10-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX10-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX10-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3 ; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4 +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4 -; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4 +; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execnz .LBB26_1 @@ -7851,26 +7871,26 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX90A-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX90A-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX90A-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB26_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7889,26 +7909,26 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX908-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX908-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX908-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB26_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7926,29 +7946,29 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX8-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX8-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX8-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8 ; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB26_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8047,31 +8067,34 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, v4, v1 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v1 ; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -8096,32 +8119,33 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, v4, v2 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2 ; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 +; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo +; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff -; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -8144,27 +8168,27 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX942-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX942-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX942-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4 +; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4 ; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4 ; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4 +; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1] -; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1] +; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB27_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8182,30 +8206,32 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, v4, v1 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -8226,30 +8252,32 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1 @@ -8269,27 +8297,27 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX10-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX10-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX10-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3 ; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4 +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4 -; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4 +; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execnz .LBB27_1 @@ -8309,26 +8337,26 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX90A-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX90A-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX90A-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB27_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8347,26 +8375,26 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX908-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX908-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX908-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB27_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8384,29 +8412,29 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX8-NEXT: v_min_f32_e32 v4, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX8-NEXT: v_min_f32_e32 v3, v3, v2 ; GFX8-NEXT: v_min_f32_e32 v5, v5, v1 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8 ; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB27_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll index d74338caba1cd..929bb61ddabcf 100644 --- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll +++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll @@ -453,13 +453,14 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX12-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v1 +; GFX12-NEXT: v_mov_b32_e32 v2, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v2 ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 +; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -478,12 +479,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB2_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -498,13 +499,14 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX11-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v1 +; GFX11-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v2 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -521,13 +523,13 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v1 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB2_1 @@ -543,12 +545,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -563,12 +565,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -584,12 +586,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -605,12 +607,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB2_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -626,12 +628,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB2_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB2_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -654,13 +656,14 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v1 +; GFX12-NEXT: v_mov_b32_e32 v2, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v2 ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65532 +; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65532 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -679,12 +682,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB3_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -699,13 +702,14 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v1 +; GFX11-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v2 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65532 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65532 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -722,13 +726,13 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v1 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB3_1 @@ -744,12 +748,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB3_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -764,12 +768,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB3_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -785,12 +789,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB3_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -806,12 +810,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB3_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -828,12 +832,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind ; GFX6-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB3_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1296,13 +1300,14 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_add_f64_e32 v[3:4], -4.0, v[1:2] +; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_add_f64_e32 v[1:2], -4.0, v[3:4] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] +; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -1321,12 +1326,12 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_add_f64 v[4:5], v[2:3], -4.0 -; GFX942-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5] +; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[2:3] +; GFX942-NEXT: v_add_f64 v[2:3], v[4:5], -4.0 +; GFX942-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3] ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b64_e32 v[2:3], v[4:5] ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB6_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1341,13 +1346,14 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 +; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] +; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -1364,14 +1370,14 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 +; GFX10-NEXT: v_mov_b32_e32 v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v3, v1 +; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX10-NEXT: v_mov_b32_e32 v1, v3 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB6_1 @@ -1387,12 +1393,12 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_add_f64 v[4:5], v[2:3], -4.0 -; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] +; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], -4.0 +; GFX90A-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3] ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB6_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1407,13 +1413,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 -; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX908-NEXT: v_mov_b32_e32 v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v3, v1 +; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 +; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB6_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1429,13 +1435,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 -; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX8-NEXT: v_mov_b32_e32 v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v3, v1 +; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 +; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v2, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB6_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1451,13 +1457,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 -; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX7-NEXT: v_mov_b32_e32 v4, v2 +; GFX7-NEXT: v_mov_b32_e32 v3, v1 +; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 +; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v2, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB6_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1473,13 +1479,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 -; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] +; GFX6-NEXT: v_mov_b32_e32 v4, v2 +; GFX6-NEXT: v_mov_b32_e32 v3, v1 +; GFX6-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 +; GFX6-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX6-NEXT: v_mov_b32_e32 v1, v3 +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v2, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB6_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1502,13 +1508,14 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_add_f64_e32 v[3:4], -4.0, v[1:2] +; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_add_f64_e32 v[1:2], -4.0, v[3:4] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528 +; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -1527,12 +1534,12 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX942-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_add_f64 v[4:5], v[2:3], -4.0 -; GFX942-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5] offset:65528 +; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[2:3] +; GFX942-NEXT: v_add_f64 v[2:3], v[4:5], -4.0 +; GFX942-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3] offset:65528 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b64_e32 v[2:3], v[4:5] ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB7_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1547,13 +1554,14 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 +; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528 +; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -1570,14 +1578,14 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 +; GFX10-NEXT: v_mov_b32_e32 v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v3, v1 +; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528 +; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2] -; GFX10-NEXT: v_mov_b32_e32 v1, v3 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4] ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB7_1 @@ -1593,12 +1601,12 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX90A-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_add_f64 v[4:5], v[2:3], -4.0 -; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5] offset:65528 +; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] +; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], -4.0 +; GFX90A-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3] offset:65528 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB7_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1613,13 +1621,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 -; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528 +; GFX908-NEXT: v_mov_b32_e32 v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v3, v1 +; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 +; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1635,13 +1643,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 -; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528 +; GFX8-NEXT: v_mov_b32_e32 v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v3, v1 +; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 +; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v2, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1657,13 +1665,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], -4.0 -; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528 +; GFX7-NEXT: v_mov_b32_e32 v4, v2 +; GFX7-NEXT: v_mov_b32_e32 v3, v1 +; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], -4.0 +; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2] -; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4] ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v2, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB7_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1680,13 +1688,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind ; GFX6-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f64 v[3:4], v[0:1], -4.0 -; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v2, v[0:1], v[3:4] +; GFX6-NEXT: v_mov_b32_e32 v4, v1 +; GFX6-NEXT: v_mov_b32_e32 v3, v0 +; GFX6-NEXT: v_add_f64 v[0:1], v[3:4], -4.0 +; GFX6-NEXT: ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1] ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[0:1] -; GFX6-NEXT: v_mov_b32_e32 v0, v3 +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4] ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB7_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2494,27 +2502,27 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -2535,28 +2543,28 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -2581,15 +2589,15 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX942-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX942-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB10_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2602,27 +2610,27 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2637,28 +2645,28 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -2673,23 +2681,23 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: ds_read_b32 v2, v1 -; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX10-NEXT: ds_read_b32 v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX10-NEXT: v_and_b32_e32 v0, 24, v0 -; GFX10-NEXT: v_not_b32_e32 v3, v3 +; GFX10-NEXT: v_not_b32_e32 v2, v2 ; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX10-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX10-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB10_1 @@ -2711,15 +2719,15 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX90A-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX90A-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB10_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2740,15 +2748,15 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX908-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX908-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB10_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2770,16 +2778,16 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX8-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX8-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB10_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2800,18 +2808,18 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB10_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2832,18 +2840,18 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB10_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2874,19 +2882,19 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -2917,19 +2925,20 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -2955,15 +2964,15 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX942-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX942-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB11_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2986,19 +2995,19 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 +; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -3023,19 +3032,20 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -3058,16 +3068,16 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX10-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX10-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB11_1 @@ -3090,15 +3100,15 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX90A-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX90A-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB11_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3120,15 +3130,15 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX908-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 -; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX908-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 +; GFX908-NEXT: s_waitcnt lgkmcnt(0) +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB11_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3151,16 +3161,16 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX8-NEXT: v_add_f16_e32 v4, -4.0, v4 -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX8-NEXT: v_add_f16_e32 v3, -4.0, v3 +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB11_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3182,18 +3192,18 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB11_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3215,18 +3225,18 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind ; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB11_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3548,16 +3558,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_add_f16_e32 v2.l, -4.0, v1.l -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, -4.0, v2.l +; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -3580,16 +3590,17 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v1 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, -4.0, v2 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -3609,13 +3620,13 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_add_f16_e32 v2, -4.0, v1 -; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_add_f16_e32 v1, -4.0, v2 +; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB13_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3630,16 +3641,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_add_f16_e32 v2.l, -4.0, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, -4.0, v2.l +; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -3656,16 +3667,17 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v1 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, -4.0, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -3682,15 +3694,15 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_f16_e32 v2, -4.0, v1 -; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_add_f16_e32 v1, -4.0, v2 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB13_1 @@ -3707,13 +3719,13 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_add_f16_e32 v2, -4.0, v1 -; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_add_f16_e32 v1, -4.0, v2 +; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB13_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3729,13 +3741,13 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_add_f16_e32 v2, -4.0, v1 -; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_add_f16_e32 v1, -4.0, v2 +; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB13_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3751,14 +3763,14 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f16_e32 v2, -4.0, v1 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_add_f16_e32 v1, -4.0, v2 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX8-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB13_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3774,16 +3786,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 -; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2 -; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB13_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3800,16 +3812,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) ; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1 -; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2 +; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB13_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4759,38 +4771,38 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -4811,37 +4823,37 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -4867,22 +4879,22 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB16_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4895,38 +4907,38 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0 -; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4941,37 +4953,37 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0 -; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3 +; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -4986,28 +4998,28 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX10-NEXT: v_and_b32_e32 v1, -4, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: ds_read_b32 v2, v1 -; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff +; GFX10-NEXT: ds_read_b32 v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff ; GFX10-NEXT: v_and_b32_e32 v0, 24, v0 -; GFX10-NEXT: v_not_b32_e32 v3, v3 +; GFX10-NEXT: v_not_b32_e32 v2, v2 ; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB16_1 @@ -5030,20 +5042,20 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX90A-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX90A-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB16_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5065,20 +5077,20 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX908-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX908-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB16_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5100,23 +5112,23 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 -; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 -; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 -; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 +; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB16_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[4:5] @@ -5136,18 +5148,18 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB16_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5168,18 +5180,18 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind { ; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB16_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5210,29 +5222,30 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5 -; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -5263,28 +5276,29 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -5311,22 +5325,22 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB17_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5350,28 +5364,29 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5 -; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -5397,27 +5412,28 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -5440,21 +5456,21 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB17_1 @@ -5478,20 +5494,20 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX90A-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX90A-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB17_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5514,20 +5530,20 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX908-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX908-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3 +; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc +; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB17_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5550,22 +5566,22 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc +; GFX8-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB17_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5587,18 +5603,18 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX7-NEXT: v_mov_b32_e32 v4, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v3, v4 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB17_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5620,18 +5636,18 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin ; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX6-NEXT: v_and_b32_e32 v5, v3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4 -; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX6-NEXT: v_mov_b32_e32 v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX6-NEXT: v_and_b32_e32 v5, v4, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3 +; GFX6-NEXT: v_or_b32_e32 v3, v5, v3 +; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v3, v4 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB17_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6031,26 +6047,27 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX12-TRUE16-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h -; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3 +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3 ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -6073,25 +6090,26 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX12-FAKE16-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -6112,21 +6130,21 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB19_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6142,25 +6160,26 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h -; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3 ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -6178,24 +6197,25 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -6212,21 +6232,21 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo +; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB19_1 @@ -6244,20 +6264,20 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB19_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6274,20 +6294,20 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2 -; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc -; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB19_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6303,21 +6323,21 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1 ; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc -; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc +; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB19_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6333,16 +6353,16 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB19_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6359,16 +6379,16 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) ; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v2 -; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v1 +; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB19_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6906,13 +6926,14 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX12-NEXT: v_mov_b32_e32 v3, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 +; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX12-NEXT: v_mov_b32_e32 v2, v3 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -6931,12 +6952,12 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] -; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX942-NEXT: v_mov_b32_e32 v3, v2 +; GFX942-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v2, v3 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB22_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6951,13 +6972,14 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX11-NEXT: v_mov_b32_e32 v3, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX11-NEXT: v_mov_b32_e32 v2, v3 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -6974,13 +6996,13 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX10-NEXT: v_mov_b32_e32 v3, v2 +; GFX10-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v3 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB22_1 @@ -6996,12 +7018,12 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] -; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX90A-NEXT: v_mov_b32_e32 v3, v2 +; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v2, v3 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB22_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7016,12 +7038,12 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] -; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX908-NEXT: v_mov_b32_e32 v3, v2 +; GFX908-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v3 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB22_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7037,14 +7059,14 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1 -; GFX8-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 +; GFX8-NEXT: v_mov_b32_e32 v3, v2 +; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1 +; GFX8-NEXT: v_or_b32_e32 v2, v4, v2 +; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v2, v3 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB22_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7149,13 +7171,14 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX12-NEXT: v_mov_b32_e32 v3, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532 +; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX12-NEXT: v_mov_b32_e32 v2, v3 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -7174,12 +7197,12 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX942-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] -; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX942-NEXT: v_mov_b32_e32 v3, v2 +; GFX942-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v2, v3 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB23_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7194,13 +7217,14 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX11-NEXT: v_mov_b32_e32 v3, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX11-NEXT: v_mov_b32_e32 v2, v3 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -7217,13 +7241,13 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX10-NEXT: v_mov_b32_e32 v3, v2 +; GFX10-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2 -; GFX10-NEXT: v_mov_b32_e32 v2, v3 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB23_1 @@ -7239,12 +7263,12 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] -; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX90A-NEXT: v_mov_b32_e32 v3, v2 +; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v2, v3 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB23_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7259,12 +7283,12 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1] -; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX908-NEXT: v_mov_b32_e32 v3, v2 +; GFX908-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1] +; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v2, v3 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB23_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7280,14 +7304,14 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h ; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1 -; GFX8-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532 +; GFX8-NEXT: v_mov_b32_e32 v3, v2 +; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1 +; GFX8-NEXT: v_or_b32_e32 v2, v4, v2 +; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v2, v3 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB23_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8357,31 +8381,34 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1 ; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -8406,32 +8433,33 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 +; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo +; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff -; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -8454,27 +8482,27 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX942-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX942-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX942-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4 +; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4 ; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4 ; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4 +; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1] -; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1] +; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB26_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8492,30 +8520,32 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -8536,30 +8566,32 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1 @@ -8579,27 +8611,27 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX10-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX10-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX10-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3 ; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4 +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4 -; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4 +; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execnz .LBB26_1 @@ -8619,26 +8651,26 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX90A-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX90A-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX90A-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB26_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8657,26 +8689,26 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX908-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX908-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX908-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB26_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8694,29 +8726,29 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> ; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX8-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX8-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX8-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8 ; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 +; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB26_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8815,31 +8847,34 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1 ; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 -; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532 +; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532 ; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -8864,32 +8899,33 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX12-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 +; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo +; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff -; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 -; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532 +; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532 ; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -8912,27 +8948,27 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX942-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX942-NEXT: v_mov_b32_e32 v4, v3 +; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX942-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX942-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4 +; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4 ; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4 ; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4 +; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1] -; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5 -; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1] +; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5 +; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v3, v4 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB27_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8950,30 +8986,32 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532 +; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl0_inv -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -8994,30 +9032,32 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3 -; GFX11-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0 +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532 +; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl0_inv -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1 @@ -9037,27 +9077,27 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX10-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX10-NEXT: v_mov_b32_e32 v4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX10-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX10-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3 ; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff +; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff ; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4 +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4 -; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4 +; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4 ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execnz .LBB27_1 @@ -9077,26 +9117,26 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX90A-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX90A-NEXT: v_mov_b32_e32 v4, v3 +; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX90A-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX90A-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v3, v4 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB27_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9115,26 +9155,26 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX908-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX908-NEXT: v_mov_b32_e32 v4, v3 +; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX908-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX908-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5 -; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8 +; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8 ; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 -; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 +; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9 -; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9 +; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v3, v4 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB27_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9152,29 +9192,29 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b ; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; GFX8-NEXT: v_sub_f32_e32 v4, v4, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX8-NEXT: v_sub_f32_e32 v3, v3, v2 ; GFX8-NEXT: v_sub_f32_e32 v5, v5, v1 -; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1 +; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1 ; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5 ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 ; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8 ; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5] ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532 +; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16 +; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v3, v4 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB27_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9489,13 +9529,14 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX12-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_dscnt 0x0 -; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v1 +; GFX12-NEXT: v_mov_b32_e32 v2, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v2 ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 +; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_SE -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX12-NEXT: v_mov_b32_e32 v1, v2 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -9514,12 +9555,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX942-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX942-NEXT: v_mov_b32_e32 v2, v1 +; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1] -; GFX942-NEXT: v_mov_b32_e32 v1, v2 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB29_1 ; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9534,13 +9575,14 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v1 +; GFX11-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v2 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 +; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX11-NEXT: v_mov_b32_e32 v1, v2 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 @@ -9557,13 +9599,13 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX10-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v1 +; GFX10-NEXT: v_mov_b32_e32 v2, v1 +; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, v2 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB29_1 @@ -9579,12 +9621,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX90A-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX90A-NEXT: v_mov_b32_e32 v2, v1 +; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX90A-NEXT: v_mov_b32_e32 v1, v2 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB29_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9599,12 +9641,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX908-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX908-NEXT: v_mov_b32_e32 v2, v1 +; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX908-NEXT: s_waitcnt lgkmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX908-NEXT: v_mov_b32_e32 v1, v2 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB29_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9620,12 +9662,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX8-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX8-NEXT: v_mov_b32_e32 v2, v1 +; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX8-NEXT: v_mov_b32_e32 v1, v2 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB29_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9641,12 +9683,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX7-NEXT: v_mov_b32_e32 v2, v1 +; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX7-NEXT: v_mov_b32_e32 v1, v2 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB29_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9662,12 +9704,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp ; GFX6-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v1 -; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 +; GFX6-NEXT: v_mov_b32_e32 v2, v1 +; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v2 +; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GFX6-NEXT: v_mov_b32_e32 v1, v2 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB29_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll b/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll index 68506cec96a72..9056d40ad8878 100644 --- a/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll +++ b/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll @@ -36,20 +36,19 @@ define amdgpu_kernel void @s_lshr_v2i16(ptr addrspace(1) %out, <2 x i16> %lhs, < ; CI-LABEL: s_lshr_v2i16: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; CI-NEXT: s_mov_b32 s7, 0xf000 -; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_mov_b32 s4, s0 -; CI-NEXT: s_mov_b32 s5, s1 -; CI-NEXT: s_and_b32 s0, s2, 0xffff -; CI-NEXT: s_lshr_b32 s1, s2, 16 -; CI-NEXT: s_lshr_b32 s2, s3, 16 -; CI-NEXT: s_lshr_b32 s1, s1, s2 -; CI-NEXT: s_lshl_b32 s1, s1, 16 -; CI-NEXT: s_lshr_b32 s0, s0, s3 -; CI-NEXT: s_or_b32 s0, s0, s1 -; CI-NEXT: v_mov_b32_e32 v0, s0 -; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; CI-NEXT: s_mov_b64 s[4:5], s[2:3] +; CI-NEXT: s_and_b32 s6, s4, 0xffff +; CI-NEXT: s_lshr_b32 s4, s4, 16 +; CI-NEXT: s_lshr_b32 s7, s5, 16 +; CI-NEXT: s_lshr_b32 s4, s4, s7 +; CI-NEXT: s_lshl_b32 s4, s4, 16 +; CI-NEXT: s_lshr_b32 s5, s6, s5 +; CI-NEXT: s_or_b32 s4, s5, s4 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: v_mov_b32_e32 v0, s4 +; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; GFX10-LABEL: s_lshr_v2i16: diff --git a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll index 680942fcb4d4b..9ecd35e7ddd11 100644 --- a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll +++ b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll @@ -133,7 +133,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no ; CHECK-NEXT: ; %bb.3: ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; CHECK-NEXT: v_add_nc_u32_e32 v45, -1, v42 -; CHECK-NEXT: s_mov_b32 s53, 0 +; CHECK-NEXT: s_mov_b32 s55, 0 ; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v45 ; CHECK-NEXT: s_and_b32 exec_lo, exec_lo, vcc_lo ; CHECK-NEXT: s_cbranch_execz .LBB0_25 @@ -141,7 +141,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no ; CHECK-NEXT: v_lshlrev_b32_e32 v43, 10, v43 ; CHECK-NEXT: v_add_nc_u32_e32 v46, 0x3c05, v0 ; CHECK-NEXT: v_mov_b32_e32 v47, 0 -; CHECK-NEXT: s_mov_b32 s55, 0 +; CHECK-NEXT: s_mov_b32 s53, 0 ; CHECK-NEXT: .LBB0_5: ; =>This Loop Header: Depth=1 ; CHECK-NEXT: ; Child Loop BB0_8 Depth 2 ; CHECK-NEXT: ; Child Loop BB0_20 Depth 2 @@ -866,8 +866,8 @@ define protected amdgpu_kernel void @kernel_round1_short(ptr addrspace(1) nocapt ; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] ; CHECK-NEXT: v_mov_b32_e32 v41, v0 ; CHECK-NEXT: v_lshlrev_b32_e32 v42, 10, v42 -; CHECK-NEXT: s_mov_b32 s52, 0 ; CHECK-NEXT: s_mov_b32 s4, 0 +; CHECK-NEXT: s_mov_b32 s52, 0 ; CHECK-NEXT: ds_write_b8 v46, v43 offset:15364 ; CHECK-NEXT: v_add_nc_u32_e32 v45, -1, v41 ; CHECK-NEXT: .LBB1_1: ; %.37 diff --git a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll index 46b8df4b4537e..9cc0e6228a913 100644 --- a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll @@ -133,35 +133,33 @@ define amdgpu_kernel void @i16_mad24(ptr addrspace(1) %out, i16 %a, i16 %b, i16 ; GCN-LABEL: i16_mad24: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_load_dword s4, s[4:5], 0xb -; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_load_dword s6, s[4:5], 0xb ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s2, s2, 16 -; GCN-NEXT: s_mul_i32 s2, s4, s2 -; GCN-NEXT: s_add_i32 s2, s2, s3 -; GCN-NEXT: s_sext_i32_i16 s2, s2 -; GCN-NEXT: s_mov_b32 s6, -1 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_lshr_b32 s2, s4, 16 +; GCN-NEXT: s_mul_i32 s2, s6, s2 +; GCN-NEXT: s_add_i32 s2, s2, s5 +; GCN-NEXT: s_sext_i32_i16 s4, s2 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; GFX8-LABEL: i16_mad24: ; GFX8: ; %bb.0: ; %entry ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX8-NEXT: s_load_dword s8, s[4:5], 0x2c -; GFX8-NEXT: s_mov_b32 s7, 0xf000 -; GFX8-NEXT: s_mov_b32 s6, -1 +; GFX8-NEXT: s_load_dword s6, s[4:5], 0x2c ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_mov_b32 s4, s0 -; GFX8-NEXT: s_lshr_b32 s0, s2, 16 -; GFX8-NEXT: s_mul_i32 s0, s8, s0 -; GFX8-NEXT: s_add_i32 s0, s0, s3 -; GFX8-NEXT: s_sext_i32_i16 s0, s0 -; GFX8-NEXT: s_mov_b32 s5, s1 -; GFX8-NEXT: v_mov_b32_e32 v0, s0 -; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX8-NEXT: s_lshr_b32 s4, s4, 16 +; GFX8-NEXT: s_mul_i32 s4, s6, s4 +; GFX8-NEXT: s_add_i32 s4, s4, s5 +; GFX8-NEXT: s_sext_i32_i16 s4, s4 +; GFX8-NEXT: s_mov_b32 s3, 0xf000 +; GFX8-NEXT: s_mov_b32 s2, -1 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX8-NEXT: s_endpgm entry: %0 = mul i16 %a, %b diff --git a/llvm/test/CodeGen/AMDGPU/max.ll b/llvm/test/CodeGen/AMDGPU/max.ll index ba532949a687d..c48e25f36e99f 100644 --- a/llvm/test/CodeGen/AMDGPU/max.ll +++ b/llvm/test/CodeGen/AMDGPU/max.ll @@ -155,14 +155,13 @@ define amdgpu_kernel void @s_test_imax_sge_i32(ptr addrspace(1) %out, i32 %a, i3 ; SI-LABEL: s_test_imax_sge_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_max_i32 s0, s2, s3 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_max_i32 s4, s4, s5 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; GFX1250-LABEL: s_test_imax_sge_i32: @@ -357,16 +356,15 @@ define amdgpu_kernel void @s_test_imax_sgt_imm_v2i32(ptr addrspace(1) %out, <2 x ; SI-LABEL: s_test_imax_sgt_imm_v2i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_max_i32 s0, s3, 9 -; SI-NEXT: s_max_i32 s1, s2, 9 -; SI-NEXT: v_mov_b32_e32 v0, s1 -; SI-NEXT: v_mov_b32_e32 v1, s0 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_max_i32 s5, s5, 9 +; SI-NEXT: s_max_i32 s4, s4, 9 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; GFX1250-LABEL: s_test_imax_sgt_imm_v2i32: @@ -472,14 +470,13 @@ define amdgpu_kernel void @s_test_imax_sgt_i32(ptr addrspace(1) %out, i32 %a, i3 ; SI-LABEL: s_test_imax_sgt_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_max_i32 s0, s2, s3 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_max_i32 s4, s4, s5 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; GFX1250-LABEL: s_test_imax_sgt_i32: @@ -582,14 +579,13 @@ define amdgpu_kernel void @s_test_umax_uge_i32(ptr addrspace(1) %out, i32 %a, i3 ; SI-LABEL: s_test_umax_uge_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_max_u32 s0, s2, s3 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_max_u32 s4, s4, s5 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; GFX1250-LABEL: s_test_umax_uge_i32: @@ -817,14 +813,13 @@ define amdgpu_kernel void @s_test_umax_ugt_i32(ptr addrspace(1) %out, i32 %a, i3 ; SI-LABEL: s_test_umax_ugt_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_max_u32 s0, s2, s3 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_max_u32 s4, s4, s5 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; GFX1250-LABEL: s_test_umax_ugt_i32: @@ -858,16 +853,15 @@ define amdgpu_kernel void @s_test_umax_ugt_imm_v2i32(ptr addrspace(1) %out, <2 x ; SI-LABEL: s_test_umax_ugt_imm_v2i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_max_u32 s0, s3, 23 -; SI-NEXT: s_max_u32 s1, s2, 15 -; SI-NEXT: v_mov_b32_e32 v0, s1 -; SI-NEXT: v_mov_b32_e32 v1, s0 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_max_u32 s5, s5, 23 +; SI-NEXT: s_max_u32 s4, s4, 15 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; GFX1250-LABEL: s_test_umax_ugt_imm_v2i32: diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll index ca4f5d22ca9a0..43752c22b1f3e 100644 --- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll +++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll @@ -90,18 +90,18 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) { ; CHECK-NEXT: .LBB0_13: ; %loop-memcpy-expansion2 ; CHECK-NEXT: ; Parent Loop BB0_11 Depth=1 ; CHECK-NEXT: ; => This Inner Loop Header: Depth=2 -; CHECK-NEXT: v_mov_b32_e32 v6, s12 -; CHECK-NEXT: v_mov_b32_e32 v7, s13 +; CHECK-NEXT: v_mov_b32_e32 v6, s10 +; CHECK-NEXT: v_mov_b32_e32 v7, s11 ; CHECK-NEXT: flat_load_dwordx4 v[10:13], v[6:7] -; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s12, v8 -; CHECK-NEXT: s_add_u32 s12, s12, 16 +; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s10, v8 +; CHECK-NEXT: s_add_u32 s10, s10, 16 ; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v9, v7, vcc -; CHECK-NEXT: s_addc_u32 s13, s13, 0 -; CHECK-NEXT: v_cmp_ge_u64_e32 vcc, s[12:13], v[0:1] -; CHECK-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; CHECK-NEXT: s_addc_u32 s11, s11, 0 +; CHECK-NEXT: v_cmp_ge_u64_e32 vcc, s[10:11], v[0:1] +; CHECK-NEXT: s_or_b64 s[12:13], vcc, s[12:13] ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: flat_store_dwordx4 v[6:7], v[10:13] -; CHECK-NEXT: s_andn2_b64 exec, exec, s[10:11] +; CHECK-NEXT: s_andn2_b64 exec, exec, s[12:13] ; CHECK-NEXT: s_cbranch_execnz .LBB0_13 ; CHECK-NEXT: .LBB0_14: ; %Flow15 ; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1 @@ -115,8 +115,8 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) { ; CHECK-NEXT: s_cbranch_execz .LBB0_9 ; CHECK-NEXT: ; %bb.16: ; %loop-memcpy-residual4.preheader ; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1 -; CHECK-NEXT: s_mov_b64 s[12:13], 0 ; CHECK-NEXT: s_mov_b64 s[14:15], 0 +; CHECK-NEXT: s_mov_b64 s[12:13], 0 ; CHECK-NEXT: .LBB0_17: ; %loop-memcpy-residual4 ; CHECK-NEXT: ; Parent Loop BB0_11 Depth=1 ; CHECK-NEXT: ; => This Inner Loop Header: Depth=2 diff --git a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll index 14b0729b37302..953511db10b29 100644 --- a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll +++ b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll @@ -10,13 +10,13 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align ; CHECK-LABEL: memmove_p0_p0: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_and_b32_e32 v8, 15, v4 -; CHECK-NEXT: v_mov_b32_e32 v9, 0 -; CHECK-NEXT: v_and_b32_e32 v6, -16, v4 -; CHECK-NEXT: v_mov_b32_e32 v7, v5 +; CHECK-NEXT: v_and_b32_e32 v6, 15, v4 +; CHECK-NEXT: v_mov_b32_e32 v7, 0 +; CHECK-NEXT: v_and_b32_e32 v8, -16, v4 +; CHECK-NEXT: v_mov_b32_e32 v9, v5 ; CHECK-NEXT: s_mov_b32 s6, exec_lo -; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9] -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9] ; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1] ; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6 ; CHECK-NEXT: s_cbranch_execnz .LBB0_3 @@ -33,10 +33,10 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader ; CHECK-NEXT: v_mov_b32_e32 v5, v3 ; CHECK-NEXT: v_mov_b32_e32 v11, v1 -; CHECK-NEXT: v_mov_b32_e32 v13, v7 +; CHECK-NEXT: v_mov_b32_e32 v13, v9 ; CHECK-NEXT: v_mov_b32_e32 v4, v2 ; CHECK-NEXT: v_mov_b32_e32 v10, v0 -; CHECK-NEXT: v_mov_b32_e32 v12, v6 +; CHECK-NEXT: v_mov_b32_e32 v12, v8 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB0_5: ; %memmove_fwd_main_loop @@ -59,20 +59,20 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: s_and_saveexec_b32 s8, s4 ; CHECK-NEXT: s_cbranch_execz .LBB0_9 ; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5 -; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5 +; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB0_8: ; %memmove_fwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: flat_load_ubyte v4, v[2:3] -; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5 ; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5 -; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s9, s5, s9 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: flat_store_byte v[0:1], v4 @@ -82,10 +82,10 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: s_cbranch_execnz .LBB0_8 ; CHECK-NEXT: .LBB0_9: ; %Flow28 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8 -; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 +; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 ; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3 -; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 +; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 ; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5 ; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7 ; CHECK-NEXT: s_cbranch_execz .LBB0_2 @@ -104,11 +104,11 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: .LBB0_12: ; %memmove_bwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: flat_load_ubyte v12, v[10:11] -; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4 +; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4 ; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4 -; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s8, s4, s8 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: flat_store_byte v[4:5], v12 @@ -129,19 +129,19 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB0_15: ; %memmove_bwd_main_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo -; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4 -; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[4:5] -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo -; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] -; CHECK-NEXT: v_mov_b32_e32 v7, v5 -; CHECK-NEXT: v_mov_b32_e32 v6, v4 +; CHECK-NEXT: v_mov_b32_e32 v11, v9 +; CHECK-NEXT: v_mov_b32_e32 v10, v8 +; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10 +; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo +; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16 +; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo +; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[4:5] +; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10 +; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9] +; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4 ; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11] +; CHECK-NEXT: flat_store_dwordx4 v[10:11], v[4:7] ; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7 ; CHECK-NEXT: s_cbranch_execnz .LBB0_15 ; CHECK-NEXT: .LBB0_16: ; %Flow32 @@ -158,13 +158,13 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align ; CHECK-LABEL: memmove_p0_p1: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_and_b32_e32 v8, 15, v4 -; CHECK-NEXT: v_mov_b32_e32 v9, 0 -; CHECK-NEXT: v_and_b32_e32 v6, -16, v4 -; CHECK-NEXT: v_mov_b32_e32 v7, v5 +; CHECK-NEXT: v_and_b32_e32 v6, 15, v4 +; CHECK-NEXT: v_mov_b32_e32 v7, 0 +; CHECK-NEXT: v_and_b32_e32 v8, -16, v4 +; CHECK-NEXT: v_mov_b32_e32 v9, v5 ; CHECK-NEXT: s_mov_b32 s6, exec_lo -; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9] -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9] ; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1] ; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6 ; CHECK-NEXT: s_cbranch_execnz .LBB1_3 @@ -181,10 +181,10 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader ; CHECK-NEXT: v_mov_b32_e32 v5, v3 ; CHECK-NEXT: v_mov_b32_e32 v11, v1 -; CHECK-NEXT: v_mov_b32_e32 v13, v7 +; CHECK-NEXT: v_mov_b32_e32 v13, v9 ; CHECK-NEXT: v_mov_b32_e32 v4, v2 ; CHECK-NEXT: v_mov_b32_e32 v10, v0 -; CHECK-NEXT: v_mov_b32_e32 v12, v6 +; CHECK-NEXT: v_mov_b32_e32 v12, v8 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB1_5: ; %memmove_fwd_main_loop @@ -207,20 +207,20 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: s_and_saveexec_b32 s8, s4 ; CHECK-NEXT: s_cbranch_execz .LBB1_9 ; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5 -; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5 +; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB1_8: ; %memmove_fwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: global_load_ubyte v4, v[2:3], off -; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5 ; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5 -; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s9, s5, s9 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: flat_store_byte v[0:1], v4 @@ -230,10 +230,10 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: s_cbranch_execnz .LBB1_8 ; CHECK-NEXT: .LBB1_9: ; %Flow30 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8 -; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 +; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 ; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3 ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 -; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 +; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 ; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5 ; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7 ; CHECK-NEXT: s_cbranch_execz .LBB1_2 @@ -252,11 +252,11 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: .LBB1_12: ; %memmove_bwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: global_load_ubyte v12, v[4:5], off -; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4 +; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4 ; CHECK-NEXT: v_add_co_u32 v4, s4, v4, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v5, s4 -; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s8, s4, s8 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: flat_store_byte v[10:11], v12 @@ -277,19 +277,19 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB1_15: ; %memmove_bwd_main_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo -; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4 -; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo -; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] -; CHECK-NEXT: v_mov_b32_e32 v7, v5 -; CHECK-NEXT: v_mov_b32_e32 v6, v4 +; CHECK-NEXT: v_mov_b32_e32 v11, v9 +; CHECK-NEXT: v_mov_b32_e32 v10, v8 +; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10 +; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo +; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16 +; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo +; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off +; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10 +; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9] +; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4 ; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7 ; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11] +; CHECK-NEXT: flat_store_dwordx4 v[10:11], v[4:7] ; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7 ; CHECK-NEXT: s_cbranch_execnz .LBB1_15 ; CHECK-NEXT: .LBB1_16: ; %Flow34 @@ -423,17 +423,17 @@ define void @memmove_p0_p3(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align ; CHECK-NEXT: .LBB2_15: ; %memmove_bwd_main_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ds_read_b128 v[7:10], v2 -; CHECK-NEXT: v_add_co_u32 v3, vcc_lo, v5, -16 -; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v6, vcc_lo -; CHECK-NEXT: v_add_co_u32 v11, vcc_lo, v0, v5 -; CHECK-NEXT: v_add_co_ci_u32_e64 v12, null, v1, v6, vcc_lo -; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[3:4] -; CHECK-NEXT: v_mov_b32_e32 v6, v4 +; CHECK-NEXT: v_mov_b32_e32 v3, v5 +; CHECK-NEXT: v_mov_b32_e32 v4, v6 ; CHECK-NEXT: v_add_nc_u32_e32 v2, -16, v2 -; CHECK-NEXT: v_mov_b32_e32 v5, v3 -; CHECK-NEXT: s_or_b32 s7, s4, s7 +; CHECK-NEXT: v_add_co_u32 v5, vcc_lo, v3, -16 +; CHECK-NEXT: v_add_co_ci_u32_e64 v6, null, -1, v4, vcc_lo +; CHECK-NEXT: v_add_co_u32 v3, s4, v0, v3 +; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, v1, v4, s4 +; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[5:6] +; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: flat_store_dwordx4 v[11:12], v[7:10] +; CHECK-NEXT: flat_store_dwordx4 v[3:4], v[7:10] ; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7 ; CHECK-NEXT: s_cbranch_execnz .LBB2_15 ; CHECK-NEXT: .LBB2_16: ; %Flow36 @@ -450,13 +450,13 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align ; CHECK-LABEL: memmove_p0_p4: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_and_b32_e32 v8, 15, v4 -; CHECK-NEXT: v_mov_b32_e32 v9, 0 -; CHECK-NEXT: v_and_b32_e32 v6, -16, v4 -; CHECK-NEXT: v_mov_b32_e32 v7, v5 +; CHECK-NEXT: v_and_b32_e32 v6, 15, v4 +; CHECK-NEXT: v_mov_b32_e32 v7, 0 +; CHECK-NEXT: v_and_b32_e32 v8, -16, v4 +; CHECK-NEXT: v_mov_b32_e32 v9, v5 ; CHECK-NEXT: s_mov_b32 s6, exec_lo -; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9] -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9] ; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1] ; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6 ; CHECK-NEXT: s_cbranch_execnz .LBB3_3 @@ -473,10 +473,10 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader ; CHECK-NEXT: v_mov_b32_e32 v5, v3 ; CHECK-NEXT: v_mov_b32_e32 v11, v1 -; CHECK-NEXT: v_mov_b32_e32 v13, v7 +; CHECK-NEXT: v_mov_b32_e32 v13, v9 ; CHECK-NEXT: v_mov_b32_e32 v4, v2 ; CHECK-NEXT: v_mov_b32_e32 v10, v0 -; CHECK-NEXT: v_mov_b32_e32 v12, v6 +; CHECK-NEXT: v_mov_b32_e32 v12, v8 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB3_5: ; %memmove_fwd_main_loop @@ -499,20 +499,20 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: s_and_saveexec_b32 s8, s4 ; CHECK-NEXT: s_cbranch_execz .LBB3_9 ; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5 -; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5 +; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB3_8: ; %memmove_fwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: global_load_ubyte v4, v[2:3], off -; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5 ; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5 -; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s9, s5, s9 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: flat_store_byte v[0:1], v4 @@ -522,10 +522,10 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: s_cbranch_execnz .LBB3_8 ; CHECK-NEXT: .LBB3_9: ; %Flow29 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8 -; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 +; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 ; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3 ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 -; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 +; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 ; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5 ; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7 ; CHECK-NEXT: s_cbranch_execz .LBB3_2 @@ -544,11 +544,11 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: .LBB3_12: ; %memmove_bwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: global_load_ubyte v12, v[10:11], off -; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4 +; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4 ; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4 -; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s8, s4, s8 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: flat_store_byte v[4:5], v12 @@ -569,19 +569,19 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB3_15: ; %memmove_bwd_main_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo -; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4 -; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo -; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] -; CHECK-NEXT: v_mov_b32_e32 v7, v5 -; CHECK-NEXT: v_mov_b32_e32 v6, v4 +; CHECK-NEXT: v_mov_b32_e32 v11, v9 +; CHECK-NEXT: v_mov_b32_e32 v10, v8 +; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10 +; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo +; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16 +; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo +; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off +; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10 +; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9] +; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4 ; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7 ; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11] +; CHECK-NEXT: flat_store_dwordx4 v[10:11], v[4:7] ; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7 ; CHECK-NEXT: s_cbranch_execnz .LBB3_15 ; CHECK-NEXT: .LBB3_16: ; %Flow33 @@ -723,17 +723,17 @@ define void @memmove_p0_p5(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align ; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:4 ; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:8 ; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:12 -; CHECK-NEXT: v_add_co_u32 v3, vcc_lo, v5, -16 -; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v6, vcc_lo -; CHECK-NEXT: v_add_co_u32 v11, vcc_lo, v0, v5 -; CHECK-NEXT: v_add_co_ci_u32_e64 v12, null, v1, v6, vcc_lo -; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[3:4] -; CHECK-NEXT: v_mov_b32_e32 v6, v4 +; CHECK-NEXT: v_mov_b32_e32 v3, v5 +; CHECK-NEXT: v_mov_b32_e32 v4, v6 ; CHECK-NEXT: v_add_nc_u32_e32 v2, -16, v2 -; CHECK-NEXT: v_mov_b32_e32 v5, v3 -; CHECK-NEXT: s_or_b32 s7, s4, s7 +; CHECK-NEXT: v_add_co_u32 v5, vcc_lo, v3, -16 +; CHECK-NEXT: v_add_co_ci_u32_e64 v6, null, -1, v4, vcc_lo +; CHECK-NEXT: v_add_co_u32 v3, s4, v0, v3 +; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, v1, v4, s4 +; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[5:6] +; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7 ; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_store_dwordx4 v[11:12], v[7:10] +; CHECK-NEXT: flat_store_dwordx4 v[3:4], v[7:10] ; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7 ; CHECK-NEXT: s_cbranch_execnz .LBB4_15 ; CHECK-NEXT: .LBB4_16: ; %Flow36 @@ -751,13 +751,13 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align ; CHECK-LABEL: memmove_p1_p0: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_and_b32_e32 v8, 15, v4 -; CHECK-NEXT: v_mov_b32_e32 v9, 0 -; CHECK-NEXT: v_and_b32_e32 v6, -16, v4 -; CHECK-NEXT: v_mov_b32_e32 v7, v5 +; CHECK-NEXT: v_and_b32_e32 v6, 15, v4 +; CHECK-NEXT: v_mov_b32_e32 v7, 0 +; CHECK-NEXT: v_and_b32_e32 v8, -16, v4 +; CHECK-NEXT: v_mov_b32_e32 v9, v5 ; CHECK-NEXT: s_mov_b32 s6, exec_lo -; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9] -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9] ; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1] ; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6 ; CHECK-NEXT: s_cbranch_execnz .LBB5_3 @@ -773,10 +773,10 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader ; CHECK-NEXT: v_mov_b32_e32 v5, v3 ; CHECK-NEXT: v_mov_b32_e32 v11, v1 -; CHECK-NEXT: v_mov_b32_e32 v13, v7 +; CHECK-NEXT: v_mov_b32_e32 v13, v9 ; CHECK-NEXT: v_mov_b32_e32 v4, v2 ; CHECK-NEXT: v_mov_b32_e32 v10, v0 -; CHECK-NEXT: v_mov_b32_e32 v12, v6 +; CHECK-NEXT: v_mov_b32_e32 v12, v8 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB5_5: ; %memmove_fwd_main_loop @@ -799,20 +799,20 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: s_and_saveexec_b32 s8, s4 ; CHECK-NEXT: s_cbranch_execz .LBB5_9 ; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5 -; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5 +; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB5_8: ; %memmove_fwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: flat_load_ubyte v4, v[2:3] -; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5 ; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5 -; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s9, s5, s9 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: global_store_byte v[0:1], v4, off @@ -822,10 +822,10 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: s_cbranch_execnz .LBB5_8 ; CHECK-NEXT: .LBB5_9: ; %Flow30 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8 -; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 +; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 ; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3 -; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 +; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 ; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5 ; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7 ; CHECK-NEXT: s_cbranch_execz .LBB5_2 @@ -844,11 +844,11 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: .LBB5_12: ; %memmove_bwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: flat_load_ubyte v12, v[10:11] -; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4 +; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4 ; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4 -; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s8, s4, s8 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: global_store_byte v[4:5], v12, off @@ -869,19 +869,19 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB5_15: ; %memmove_bwd_main_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo -; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4 -; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[4:5] -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo -; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] -; CHECK-NEXT: v_mov_b32_e32 v7, v5 -; CHECK-NEXT: v_mov_b32_e32 v6, v4 +; CHECK-NEXT: v_mov_b32_e32 v11, v9 +; CHECK-NEXT: v_mov_b32_e32 v10, v8 +; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10 +; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo +; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16 +; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo +; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[4:5] +; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10 +; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9] +; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4 ; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off +; CHECK-NEXT: global_store_dwordx4 v[10:11], v[4:7], off ; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7 ; CHECK-NEXT: s_cbranch_execnz .LBB5_15 ; CHECK-NEXT: .LBB5_16: ; %Flow34 @@ -897,13 +897,13 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align ; CHECK-LABEL: memmove_p1_p1: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_and_b32_e32 v8, 15, v4 -; CHECK-NEXT: v_mov_b32_e32 v9, 0 -; CHECK-NEXT: v_and_b32_e32 v6, -16, v4 -; CHECK-NEXT: v_mov_b32_e32 v7, v5 +; CHECK-NEXT: v_and_b32_e32 v6, 15, v4 +; CHECK-NEXT: v_mov_b32_e32 v7, 0 +; CHECK-NEXT: v_and_b32_e32 v8, -16, v4 +; CHECK-NEXT: v_mov_b32_e32 v9, v5 ; CHECK-NEXT: s_mov_b32 s6, exec_lo -; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9] -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9] ; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1] ; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6 ; CHECK-NEXT: s_cbranch_execnz .LBB6_3 @@ -919,10 +919,10 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader ; CHECK-NEXT: v_mov_b32_e32 v5, v3 ; CHECK-NEXT: v_mov_b32_e32 v11, v1 -; CHECK-NEXT: v_mov_b32_e32 v13, v7 +; CHECK-NEXT: v_mov_b32_e32 v13, v9 ; CHECK-NEXT: v_mov_b32_e32 v4, v2 ; CHECK-NEXT: v_mov_b32_e32 v10, v0 -; CHECK-NEXT: v_mov_b32_e32 v12, v6 +; CHECK-NEXT: v_mov_b32_e32 v12, v8 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB6_5: ; %memmove_fwd_main_loop @@ -945,20 +945,20 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: s_and_saveexec_b32 s8, s4 ; CHECK-NEXT: s_cbranch_execz .LBB6_9 ; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5 -; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5 +; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB6_8: ; %memmove_fwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: global_load_ubyte v4, v[2:3], off -; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5 ; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5 -; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s9, s5, s9 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: global_store_byte v[0:1], v4, off @@ -968,10 +968,10 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: s_cbranch_execnz .LBB6_8 ; CHECK-NEXT: .LBB6_9: ; %Flow32 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8 -; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 +; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 ; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3 -; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 +; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 ; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5 ; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7 ; CHECK-NEXT: s_cbranch_execz .LBB6_2 @@ -990,11 +990,11 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: .LBB6_12: ; %memmove_bwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: global_load_ubyte v12, v[10:11], off -; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4 +; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4 ; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4 -; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s8, s4, s8 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: global_store_byte v[4:5], v12, off @@ -1015,19 +1015,19 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB6_15: ; %memmove_bwd_main_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo -; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4 -; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo -; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] -; CHECK-NEXT: v_mov_b32_e32 v7, v5 -; CHECK-NEXT: v_mov_b32_e32 v6, v4 +; CHECK-NEXT: v_mov_b32_e32 v11, v9 +; CHECK-NEXT: v_mov_b32_e32 v10, v8 +; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10 +; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo +; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16 +; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo +; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off +; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10 +; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9] +; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4 ; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7 ; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off +; CHECK-NEXT: global_store_dwordx4 v[10:11], v[4:7], off ; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7 ; CHECK-NEXT: s_cbranch_execnz .LBB6_15 ; CHECK-NEXT: .LBB6_16: ; %Flow36 @@ -1109,13 +1109,13 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align ; CHECK-LABEL: memmove_p1_p4: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_and_b32_e32 v8, 15, v4 -; CHECK-NEXT: v_mov_b32_e32 v9, 0 -; CHECK-NEXT: v_and_b32_e32 v6, -16, v4 -; CHECK-NEXT: v_mov_b32_e32 v7, v5 +; CHECK-NEXT: v_and_b32_e32 v6, 15, v4 +; CHECK-NEXT: v_mov_b32_e32 v7, 0 +; CHECK-NEXT: v_and_b32_e32 v8, -16, v4 +; CHECK-NEXT: v_mov_b32_e32 v9, v5 ; CHECK-NEXT: s_mov_b32 s6, exec_lo -; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9] -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7] +; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9] ; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1] ; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6 ; CHECK-NEXT: s_cbranch_execnz .LBB8_3 @@ -1131,10 +1131,10 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader ; CHECK-NEXT: v_mov_b32_e32 v5, v3 ; CHECK-NEXT: v_mov_b32_e32 v11, v1 -; CHECK-NEXT: v_mov_b32_e32 v13, v7 +; CHECK-NEXT: v_mov_b32_e32 v13, v9 ; CHECK-NEXT: v_mov_b32_e32 v4, v2 ; CHECK-NEXT: v_mov_b32_e32 v10, v0 -; CHECK-NEXT: v_mov_b32_e32 v12, v6 +; CHECK-NEXT: v_mov_b32_e32 v12, v8 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB8_5: ; %memmove_fwd_main_loop @@ -1157,20 +1157,20 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: s_and_saveexec_b32 s8, s4 ; CHECK-NEXT: s_cbranch_execz .LBB8_9 ; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5 -; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5 +; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8 +; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5 ; CHECK-NEXT: s_mov_b32 s9, 0 ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB8_8: ; %memmove_fwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: global_load_ubyte v4, v[2:3], off -; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5 +; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5 ; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5 -; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s9, s5, s9 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: global_store_byte v[0:1], v4, off @@ -1180,10 +1180,10 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: s_cbranch_execnz .LBB8_8 ; CHECK-NEXT: .LBB8_9: ; %Flow31 ; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8 -; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 +; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 ; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3 -; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9 +; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7 ; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5 ; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7 ; CHECK-NEXT: s_cbranch_execz .LBB8_2 @@ -1202,11 +1202,11 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: .LBB8_12: ; %memmove_bwd_residual_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: global_load_ubyte v12, v[10:11], off -; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4 +; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4 ; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4 -; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9] +; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7] ; CHECK-NEXT: s_or_b32 s8, s4, s8 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: global_store_byte v[4:5], v12, off @@ -1227,19 +1227,19 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align ; CHECK-NEXT: .p2align 6 ; CHECK-NEXT: .LBB8_15: ; %memmove_bwd_main_loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo -; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6 -; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4 -; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off -; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16 -; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo -; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] -; CHECK-NEXT: v_mov_b32_e32 v7, v5 -; CHECK-NEXT: v_mov_b32_e32 v6, v4 +; CHECK-NEXT: v_mov_b32_e32 v11, v9 +; CHECK-NEXT: v_mov_b32_e32 v10, v8 +; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10 +; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo +; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16 +; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo +; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off +; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10 +; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9] +; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4 ; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7 ; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off +; CHECK-NEXT: global_store_dwordx4 v[10:11], v[4:7], off ; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7 ; CHECK-NEXT: s_cbranch_execnz .LBB8_15 ; CHECK-NEXT: .LBB8_16: ; %Flow35 diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll index 3d9c2a29cb9c1..10d4eb029ee35 100644 --- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll +++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll @@ -10,46 +10,43 @@ define amdgpu_kernel void @test_smul24_i32(ptr addrspace(1) %out, i32 %a, i32 %b ; SI-LABEL: test_smul24_i32: ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_bfe_i32 s2, s2, 0x180000 -; SI-NEXT: s_bfe_i32 s3, s3, 0x180000 -; SI-NEXT: s_mul_i32 s2, s2, s3 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s2 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_bfe_i32 s2, s4, 0x180000 +; SI-NEXT: s_bfe_i32 s4, s5, 0x180000 +; SI-NEXT: s_mul_i32 s4, s2, s4 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: test_smul24_i32: ; VI: ; %bb.0: ; %entry ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_bfe_i32 s0, s2, 0x180000 -; VI-NEXT: s_bfe_i32 s1, s3, 0x180000 -; VI-NEXT: s_mul_i32 s0, s0, s1 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_bfe_i32 s4, s4, 0x180000 +; VI-NEXT: s_bfe_i32 s5, s5, 0x180000 +; VI-NEXT: s_mul_i32 s4, s4, s5 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: test_smul24_i32: ; GFX9: ; %bb.0: ; %entry ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX9-NEXT: s_mov_b32 s7, 0xf000 -; GFX9-NEXT: s_mov_b32 s6, -1 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_mov_b32 s4, s0 -; GFX9-NEXT: s_mov_b32 s5, s1 -; GFX9-NEXT: s_bfe_i32 s0, s2, 0x180000 -; GFX9-NEXT: s_bfe_i32 s1, s3, 0x180000 -; GFX9-NEXT: s_mul_i32 s0, s0, s1 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX9-NEXT: s_bfe_i32 s4, s4, 0x180000 +; GFX9-NEXT: s_bfe_i32 s5, s5, 0x180000 +; GFX9-NEXT: s_mul_i32 s4, s4, s5 +; GFX9-NEXT: s_mov_b32 s3, 0xf000 +; GFX9-NEXT: s_mov_b32 s2, -1 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX9-NEXT: s_endpgm ; ; EG-LABEL: test_smul24_i32: @@ -127,16 +124,15 @@ define amdgpu_kernel void @test_smulhi24_i64(ptr addrspace(1) %out, i32 %a, i32 ; GFX9-LABEL: test_smulhi24_i64: ; GFX9: ; %bb.0: ; %entry ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX9-NEXT: s_mov_b32 s7, 0xf000 -; GFX9-NEXT: s_mov_b32 s6, -1 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_mov_b32 s4, s0 -; GFX9-NEXT: s_mov_b32 s5, s1 -; GFX9-NEXT: s_bfe_i32 s0, s2, 0x180000 -; GFX9-NEXT: s_bfe_i32 s1, s3, 0x180000 -; GFX9-NEXT: s_mul_hi_i32 s0, s0, s1 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX9-NEXT: s_bfe_i32 s4, s4, 0x180000 +; GFX9-NEXT: s_bfe_i32 s5, s5, 0x180000 +; GFX9-NEXT: s_mul_hi_i32 s4, s4, s5 +; GFX9-NEXT: s_mov_b32 s3, 0xf000 +; GFX9-NEXT: s_mov_b32 s2, -1 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX9-NEXT: s_endpgm ; ; EG-LABEL: test_smulhi24_i64: @@ -464,29 +460,26 @@ define amdgpu_kernel void @test_smul24_i33(ptr addrspace(1) %out, i33 %a, i33 %b ; SI-LABEL: test_smul24_i33: ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_bfe_i32 s0, s8, 0x180000 -; SI-NEXT: s_bfe_i32 s1, s2, 0x180000 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: s_mul_i32 s0, s1, s0 -; SI-NEXT: v_mul_hi_i32_i24_e32 v1, s1, v0 -; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_bfe_i32 s4, s4, 0x180000 +; SI-NEXT: s_bfe_i32 s5, s6, 0x180000 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: s_mul_i32 s4, s5, s4 +; SI-NEXT: v_mul_hi_i32_i24_e32 v1, s5, v0 +; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 31 ; SI-NEXT: v_ashr_i64 v[0:1], v[0:1], 31 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: test_smul24_i33: ; VI: ; %bb.0: ; %entry ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_bfe_i32 s2, s2, 0x180000 ; VI-NEXT: s_bfe_i32 s3, s4, 0x180000 @@ -494,10 +487,10 @@ define amdgpu_kernel void @test_smul24_i33(ptr addrspace(1) %out, i33 %a, i33 %b ; VI-NEXT: v_mul_hi_i32_i24_e32 v1, s2, v0 ; VI-NEXT: v_mul_i32_i24_e32 v0, s2, v0 ; VI-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1] -; VI-NEXT: s_mov_b32 s4, s0 +; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: v_ashrrev_i64 v[0:1], 31, v[0:1] -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: test_smul24_i33: @@ -577,31 +570,29 @@ define amdgpu_kernel void @test_smulhi24_i33(ptr addrspace(1) %out, i33 %a, i33 ; SI-LABEL: test_smulhi24_i33: ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: v_mul_hi_i32_i24_e32 v0, s2, v0 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mul_hi_i32_i24_e32 v0, s6, v0 ; SI-NEXT: v_and_b32_e32 v0, 1, v0 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: test_smulhi24_i33: ; VI: ; %bb.0: ; %entry ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: v_mul_hi_i32_i24_e32 v0, s2, v0 -; VI-NEXT: s_mov_b32 s5, s1 +; VI-NEXT: s_mov_b64 s[6:7], s[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: v_mul_hi_i32_i24_e32 v0, s6, v0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: v_and_b32_e32 v0, 1, v0 -; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: test_smulhi24_i33: diff --git a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll index e29da3a6b000f..1165401a93af8 100644 --- a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll +++ b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll @@ -10,46 +10,43 @@ define amdgpu_kernel void @test_umul24_i32(ptr addrspace(1) %out, i32 %a, i32 %b ; SI-LABEL: test_umul24_i32: ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_and_b32 s2, s2, 0xffffff -; SI-NEXT: s_and_b32 s3, s3, 0xffffff -; SI-NEXT: s_mul_i32 s2, s2, s3 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s2 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_and_b32 s2, s4, 0xffffff +; SI-NEXT: s_and_b32 s4, s5, 0xffffff +; SI-NEXT: s_mul_i32 s4, s2, s4 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: test_umul24_i32: ; VI: ; %bb.0: ; %entry ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_and_b32 s0, s2, 0xffffff -; VI-NEXT: s_and_b32 s1, s3, 0xffffff -; VI-NEXT: s_mul_i32 s0, s0, s1 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_and_b32 s4, s4, 0xffffff +; VI-NEXT: s_and_b32 s5, s5, 0xffffff +; VI-NEXT: s_mul_i32 s4, s4, s5 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: test_umul24_i32: ; GFX9: ; %bb.0: ; %entry ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX9-NEXT: s_mov_b32 s7, 0xf000 -; GFX9-NEXT: s_mov_b32 s6, -1 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_mov_b32 s4, s0 -; GFX9-NEXT: s_mov_b32 s5, s1 -; GFX9-NEXT: s_and_b32 s0, s2, 0xffffff -; GFX9-NEXT: s_and_b32 s1, s3, 0xffffff -; GFX9-NEXT: s_mul_i32 s0, s0, s1 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX9-NEXT: s_and_b32 s4, s4, 0xffffff +; GFX9-NEXT: s_and_b32 s5, s5, 0xffffff +; GFX9-NEXT: s_mul_i32 s4, s4, s5 +; GFX9-NEXT: s_mov_b32 s3, 0xf000 +; GFX9-NEXT: s_mov_b32 s2, -1 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX9-NEXT: s_endpgm entry: %0 = shl i32 %a, 8 @@ -406,16 +403,15 @@ define amdgpu_kernel void @test_umulhi24_i32_i64(ptr addrspace(1) %out, i32 %a, ; GFX9-LABEL: test_umulhi24_i32_i64: ; GFX9: ; %bb.0: ; %entry ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX9-NEXT: s_mov_b32 s7, 0xf000 -; GFX9-NEXT: s_mov_b32 s6, -1 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_mov_b32 s4, s0 -; GFX9-NEXT: s_mov_b32 s5, s1 -; GFX9-NEXT: s_and_b32 s0, s2, 0xffffff -; GFX9-NEXT: s_and_b32 s1, s3, 0xffffff -; GFX9-NEXT: s_mul_hi_u32 s0, s0, s1 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX9-NEXT: s_and_b32 s4, s4, 0xffffff +; GFX9-NEXT: s_and_b32 s5, s5, 0xffffff +; GFX9-NEXT: s_mul_hi_u32 s4, s4, s5 +; GFX9-NEXT: s_mov_b32 s3, 0xf000 +; GFX9-NEXT: s_mov_b32 s2, -1 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX9-NEXT: s_endpgm entry: %a.24 = and i32 %a, 16777215 @@ -632,33 +628,31 @@ define amdgpu_kernel void @test_umulhi16_i32(ptr addrspace(1) %out, i32 %a, i32 ; SI-LABEL: test_umulhi16_i32: ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_and_b32 s2, s2, 0xffff -; SI-NEXT: s_and_b32 s3, s3, 0xffff -; SI-NEXT: s_mul_i32 s2, s2, s3 -; SI-NEXT: s_lshr_b32 s2, s2, 16 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s2 -; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_and_b32 s2, s4, 0xffff +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_mul_i32 s2, s2, s4 +; SI-NEXT: s_lshr_b32 s4, s2, 16 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: test_umulhi16_i32: ; VI: ; %bb.0: ; %entry ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_and_b32 s0, s2, 0xffff -; VI-NEXT: s_and_b32 s1, s3, 0xffff -; VI-NEXT: s_mul_i32 s0, s0, s1 -; VI-NEXT: s_lshr_b32 s0, s0, 16 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_and_b32 s4, s4, 0xffff +; VI-NEXT: s_and_b32 s5, s5, 0xffff +; VI-NEXT: s_mul_i32 s4, s4, s5 +; VI-NEXT: s_lshr_b32 s4, s4, 16 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: buffer_store_short v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: test_umulhi16_i32: diff --git a/llvm/test/CodeGen/AMDGPU/nofpclass-call.ll b/llvm/test/CodeGen/AMDGPU/nofpclass-call.ll new file mode 100644 index 0000000000000..5eb9c9f4ed3ae --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/nofpclass-call.ll @@ -0,0 +1,191 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s + +; Check that nofpclass attributes on call returns are used in +; selectiondag. + +define internal float @func_f32(ptr addrspace(1) %ptr) { +; CHECK-LABEL: func_f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dword v0, v[0:1], off glc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: s_setpc_b64 s[30:31] + %ld = load volatile float, ptr addrspace(1) %ptr + ret float %ld +} + +define float @call_nofpclass_funcs_f32(ptr addrspace(1) %ptr) { +; CHECK-LABEL: call_nofpclass_funcs_f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_mov_b32 s18, s33 +; CHECK-NEXT: s_mov_b32 s33, s32 +; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1 +; CHECK-NEXT: buffer_store_dword v4, off, s[0:3], s33 ; 4-byte Folded Spill +; CHECK-NEXT: s_mov_b64 exec, s[16:17] +; CHECK-NEXT: v_writelane_b32 v4, s30, 0 +; CHECK-NEXT: s_addk_i32 s32, 0x400 +; CHECK-NEXT: v_writelane_b32 v4, s31, 1 +; CHECK-NEXT: s_getpc_b64 s[16:17] +; CHECK-NEXT: s_add_u32 s16, s16, func_f32@rel32@lo+4 +; CHECK-NEXT: s_addc_u32 s17, s17, func_f32@rel32@hi+12 +; CHECK-NEXT: v_mov_b32_e32 v2, v0 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] +; CHECK-NEXT: v_mov_b32_e32 v3, v0 +; CHECK-NEXT: v_mov_b32_e32 v0, v2 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] +; CHECK-NEXT: v_readlane_b32 s30, v4, 0 +; CHECK-NEXT: v_min_f32_e32 v0, v3, v0 +; CHECK-NEXT: v_readlane_b32 s31, v4, 1 +; CHECK-NEXT: s_mov_b32 s32, s33 +; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s33 ; 4-byte Folded Reload +; CHECK-NEXT: s_mov_b64 exec, s[4:5] +; CHECK-NEXT: s_mov_b32 s33, s18 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: s_setpc_b64 s[30:31] + %call0 = call nofpclass(nan) float @func_f32(ptr addrspace(1) %ptr) + %call1 = call nofpclass(nan) float @func_f32(ptr addrspace(1) %ptr) + %min = call float @llvm.minnum.f32(float %call0, float %call1) + ret float %min +} + +define internal <2 x float> @func_v2f32(ptr addrspace(1) %ptr) { +; CHECK-LABEL: func_v2f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx2 v[0:1], v[0:1], off glc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: s_setpc_b64 s[30:31] + %ld = load volatile <2 x float>, ptr addrspace(1) %ptr + ret <2 x float> %ld +} + +define <2 x float> @call_nofpclass_funcs_v2f32(ptr addrspace(1) %ptr) { +; CHECK-LABEL: call_nofpclass_funcs_v2f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_mov_b32 s18, s33 +; CHECK-NEXT: s_mov_b32 s33, s32 +; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1 +; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s33 ; 4-byte Folded Spill +; CHECK-NEXT: s_mov_b64 exec, s[16:17] +; CHECK-NEXT: v_writelane_b32 v6, s30, 0 +; CHECK-NEXT: s_addk_i32 s32, 0x400 +; CHECK-NEXT: v_writelane_b32 v6, s31, 1 +; CHECK-NEXT: s_getpc_b64 s[16:17] +; CHECK-NEXT: s_add_u32 s16, s16, func_v2f32@rel32@lo+4 +; CHECK-NEXT: s_addc_u32 s17, s17, func_v2f32@rel32@hi+12 +; CHECK-NEXT: v_mov_b32_e32 v2, v1 +; CHECK-NEXT: v_mov_b32_e32 v3, v0 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] +; CHECK-NEXT: v_mov_b32_e32 v4, v0 +; CHECK-NEXT: v_mov_b32_e32 v5, v1 +; CHECK-NEXT: v_mov_b32_e32 v0, v3 +; CHECK-NEXT: v_mov_b32_e32 v1, v2 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] +; CHECK-NEXT: v_readlane_b32 s30, v6, 0 +; CHECK-NEXT: v_min_f32_e32 v0, v4, v0 +; CHECK-NEXT: v_min_f32_e32 v1, v5, v1 +; CHECK-NEXT: v_readlane_b32 s31, v6, 1 +; CHECK-NEXT: s_mov_b32 s32, s33 +; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s33 ; 4-byte Folded Reload +; CHECK-NEXT: s_mov_b64 exec, s[4:5] +; CHECK-NEXT: s_mov_b32 s33, s18 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: s_setpc_b64 s[30:31] + %call0 = call nofpclass(nan) <2 x float> @func_v2f32(ptr addrspace(1) %ptr) + %call1 = call nofpclass(nan) <2 x float> @func_v2f32(ptr addrspace(1) %ptr) + %min = call <2 x float> @llvm.minnum.v2f32(<2 x float> %call0, <2 x float> %call1) + ret <2 x float> %min +} + +define internal double @func_f64(ptr addrspace(1) %ptr) { +; CHECK-LABEL: func_f64: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx2 v[0:1], v[0:1], off glc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: s_setpc_b64 s[30:31] + %ld = load volatile double, ptr addrspace(1) %ptr + ret double %ld +} + +define double @call_nofpclass_funcs_f64(ptr addrspace(1) %ptr) { +; CHECK-LABEL: call_nofpclass_funcs_f64: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_mov_b32 s18, s33 +; CHECK-NEXT: s_mov_b32 s33, s32 +; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1 +; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s33 ; 4-byte Folded Spill +; CHECK-NEXT: s_mov_b64 exec, s[16:17] +; CHECK-NEXT: v_writelane_b32 v6, s30, 0 +; CHECK-NEXT: s_addk_i32 s32, 0x400 +; CHECK-NEXT: v_writelane_b32 v6, s31, 1 +; CHECK-NEXT: s_getpc_b64 s[16:17] +; CHECK-NEXT: s_add_u32 s16, s16, func_f64@rel32@lo+4 +; CHECK-NEXT: s_addc_u32 s17, s17, func_f64@rel32@hi+12 +; CHECK-NEXT: v_mov_b32_e32 v4, v1 +; CHECK-NEXT: v_mov_b32_e32 v5, v0 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] +; CHECK-NEXT: v_mov_b32_e32 v2, v0 +; CHECK-NEXT: v_mov_b32_e32 v3, v1 +; CHECK-NEXT: v_mov_b32_e32 v0, v5 +; CHECK-NEXT: v_mov_b32_e32 v1, v4 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] +; CHECK-NEXT: v_readlane_b32 s30, v6, 0 +; CHECK-NEXT: v_min_f64 v[0:1], v[2:3], v[0:1] +; CHECK-NEXT: v_readlane_b32 s31, v6, 1 +; CHECK-NEXT: s_mov_b32 s32, s33 +; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s33 ; 4-byte Folded Reload +; CHECK-NEXT: s_mov_b64 exec, s[4:5] +; CHECK-NEXT: s_mov_b32 s33, s18 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: s_setpc_b64 s[30:31] + %call0 = call nofpclass(nan) double @func_f64(ptr addrspace(1) %ptr) + %call1 = call nofpclass(nan) double @func_f64(ptr addrspace(1) %ptr) + %min = call double @llvm.minnum.f64(double %call0, double %call1) + ret double %min +} + +define float @call_nofpclass_intrinsic_f32(float %x, float %y, float %z) { +; CHECK-LABEL: call_nofpclass_intrinsic_f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_sqrt_f32_e32 v0, v0 +; CHECK-NEXT: v_sqrt_f32_e32 v1, v1 +; CHECK-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; CHECK-NEXT: s_setpc_b64 s[30:31] + %call0 = call nofpclass(nan) float @llvm.amdgcn.sqrt.f32(float %x) + %call1 = call nofpclass(nan) float @llvm.amdgcn.sqrt.f32(float %y) + %lt = fcmp olt float %call0, %call1 + %min = select nsz i1 %lt, float %call0, float %call1 + ret float %min +} + +define <2 x half> @call_nofpclass_intrinsic_v2f16(float %x, float %y, float %z, float %w) { +; CHECK-LABEL: call_nofpclass_intrinsic_v2f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cvt_pkrtz_f16_f32 v0, v0, v1 +; CHECK-NEXT: v_cvt_pkrtz_f16_f32 v1, v2, v3 +; CHECK-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; CHECK-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; CHECK-NEXT: v_cmp_lt_f16_e32 vcc, v0, v1 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; CHECK-NEXT: v_cmp_lt_f16_e32 vcc, v3, v2 +; CHECK-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc +; CHECK-NEXT: s_mov_b32 s4, 0x5040100 +; CHECK-NEXT: v_perm_b32 v0, v1, v0, s4 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %call0 = call nofpclass(nan) <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %y) + %call1 = call nofpclass(nan) <2 x half> @llvm.amdgcn.cvt.pkrtz(float %z, float %w) + %lt = fcmp olt <2 x half> %call0, %call1 + %min = select nsz <2 x i1> %lt, <2 x half> %call0, <2 x half> %call1 + ret <2 x half> %min +} diff --git a/llvm/test/CodeGen/AMDGPU/or.ll b/llvm/test/CodeGen/AMDGPU/or.ll index 728067edcf399..9afaab5ebcfb6 100644 --- a/llvm/test/CodeGen/AMDGPU/or.ll +++ b/llvm/test/CodeGen/AMDGPU/or.ll @@ -136,27 +136,25 @@ define amdgpu_kernel void @scalar_or_i32(ptr addrspace(1) %out, i32 %a, i32 %b) ; GFX6-LABEL: scalar_or_i32: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_or_b32 s0, s2, s3 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_or_b32 s4, s4, s5 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX8-LABEL: scalar_or_i32: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX8-NEXT: s_mov_b32 s7, 0xf000 -; GFX8-NEXT: s_mov_b32 s6, -1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_mov_b32 s4, s0 -; GFX8-NEXT: s_or_b32 s0, s2, s3 -; GFX8-NEXT: s_mov_b32 s5, s1 -; GFX8-NEXT: v_mov_b32_e32 v0, s0 -; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX8-NEXT: s_or_b32 s4, s4, s5 +; GFX8-NEXT: s_mov_b32 s3, 0xf000 +; GFX8-NEXT: s_mov_b32 s2, -1 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX8-NEXT: s_endpgm %or = or i32 %a, %b store i32 %or, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir b/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir index 381cb8c9d1047..f098618018839 100644 --- a/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir +++ b/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir @@ -16,11 +16,11 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5 ; CHECK-NEXT: undef [[S_LOAD_DWORD_IMM:%[0-9]+]].sub1:sgpr_128 = S_LOAD_DWORD_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4) ; CHECK-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub0:sgpr_128 = S_MOV_B32 1 + ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 0 ; CHECK-NEXT: undef [[S_MOV_B32_1:%[0-9]+]].sub0:sgpr_256 = S_MOV_B32 0 ; CHECK-NEXT: TENSOR_LOAD_TO_LDS_D2 [[S_MOV_B32_]], [[S_MOV_B32_1]], 0, 0, implicit-def dead $tensorcnt, implicit $exec, implicit $tensorcnt ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]].sub0:sgpr_128 = S_MOV_B32 1 ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub0 - ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) diff --git a/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll b/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll index 5c90957edd9f5..bcece19ae5fdd 100644 --- a/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll +++ b/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll @@ -16,11 +16,11 @@ define amdgpu_cs void @if_then(ptr addrspace(8) inreg %input, ptr addrspace(8) i ; GCN-NEXT: s_cbranch_execz .LBB0_4 ; GCN-NEXT: ; %bb.3: ; %.then ; GCN-NEXT: s_or_saveexec_b32 s1, -1 -; GCN-NEXT: v_cndmask_b32_e64 v1, 0, v3, s1 -; GCN-NEXT: v_mov_b32_e32 v2, 0 -; GCN-NEXT: v_mov_b32_dpp v2, v1 row_shr:1 row_mask:0xf bank_mask:0xf +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: v_cndmask_b32_e64 v2, 0, v3, s1 +; GCN-NEXT: v_mov_b32_dpp v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; GCN-NEXT: s_mov_b32 exec_lo, s1 -; GCN-NEXT: v_mov_b32_e32 v0, v2 +; GCN-NEXT: v_mov_b32_e32 v0, v1 ; GCN-NEXT: v_mov_b32_e32 v4, -1 ; GCN-NEXT: v_mov_b32_e32 v3, 0 ; GCN-NEXT: buffer_store_dword v4, v0, s[4:7], 0 offen diff --git a/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll b/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll index a0bac532454f5..e589a6341ea0e 100644 --- a/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll +++ b/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll @@ -5,15 +5,14 @@ define amdgpu_kernel void @sext_i16_to_i32_uniform(ptr addrspace(1) %out, i16 %a ; GCN-LABEL: sext_i16_to_i32_uniform: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_sext_i32_i16 s0, s2 -; GCN-NEXT: s_add_i32 s0, s3, s0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_sext_i32_i16 s4, s4 +; GCN-NEXT: s_add_i32 s4, s5, s4 +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm %sext = sext i16 %a to i32 %res = add i32 %b, %sext diff --git a/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll b/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll index d8511c8f6be11..17db3799b0de5 100644 --- a/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll +++ b/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll @@ -22,63 +22,57 @@ define amdgpu_kernel void @s_shl_v2i16(ptr addrspace(1) %out, <2 x i16> %lhs, <2 ; VI-LABEL: s_shl_v2i16: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_lshr_b32 s0, s3, 16 -; VI-NEXT: s_lshr_b32 s1, s2, 16 -; VI-NEXT: s_lshl_b32 s0, s1, s0 -; VI-NEXT: s_lshl_b32 s1, s2, s3 -; VI-NEXT: s_lshl_b32 s0, s0, 16 -; VI-NEXT: s_and_b32 s1, s1, 0xffff -; VI-NEXT: s_or_b32 s0, s1, s0 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_lshr_b32 s6, s5, 16 +; VI-NEXT: s_lshr_b32 s7, s4, 16 +; VI-NEXT: s_lshl_b32 s4, s4, s5 +; VI-NEXT: s_lshl_b32 s5, s7, s6 +; VI-NEXT: s_lshl_b32 s5, s5, 16 +; VI-NEXT: s_and_b32 s4, s4, 0xffff +; VI-NEXT: s_or_b32 s4, s4, s5 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; CI-LABEL: s_shl_v2i16: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; CI-NEXT: s_mov_b32 s7, 0xf000 -; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_mov_b32 s4, s0 -; CI-NEXT: s_mov_b32 s5, s1 -; CI-NEXT: s_lshr_b32 s0, s2, 16 -; CI-NEXT: s_lshr_b32 s1, s3, 16 -; CI-NEXT: s_lshl_b32 s0, s0, s1 -; CI-NEXT: s_lshl_b32 s1, s2, s3 -; CI-NEXT: s_lshl_b32 s0, s0, 16 -; CI-NEXT: s_and_b32 s1, s1, 0xffff -; CI-NEXT: s_or_b32 s0, s1, s0 -; CI-NEXT: v_mov_b32_e32 v0, s0 -; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; CI-NEXT: s_mov_b64 s[4:5], s[2:3] +; CI-NEXT: s_lshr_b32 s6, s4, 16 +; CI-NEXT: s_lshr_b32 s7, s5, 16 +; CI-NEXT: s_lshl_b32 s4, s4, s5 +; CI-NEXT: s_lshl_b32 s5, s6, s7 +; CI-NEXT: s_lshl_b32 s5, s5, 16 +; CI-NEXT: s_and_b32 s4, s4, 0xffff +; CI-NEXT: s_or_b32 s4, s4, s5 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: v_mov_b32_e32 v0, s4 +; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; GFX10-LABEL: s_shl_v2i16: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX10-NEXT: s_mov_b32 s7, 0x31016000 -; GFX10-NEXT: s_mov_b32 s6, -1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: v_pk_lshlrev_b16 v0, s3, s2 -; GFX10-NEXT: s_mov_b32 s4, s0 -; GFX10-NEXT: s_mov_b32 s5, s1 -; GFX10-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX10-NEXT: s_mov_b32 s3, 0x31016000 +; GFX10-NEXT: s_mov_b32 s2, -1 +; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: s_shl_v2i16: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX11-NEXT: s_mov_b32 s7, 0x31016000 -; GFX11-NEXT: s_mov_b32 s6, -1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: v_pk_lshlrev_b16 v0, s3, s2 -; GFX11-NEXT: s_mov_b32 s4, s0 -; GFX11-NEXT: s_mov_b32 s5, s1 -; GFX11-NEXT: buffer_store_b32 v0, off, s[4:7], 0 +; GFX11-NEXT: s_mov_b32 s3, 0x31016000 +; GFX11-NEXT: s_mov_b32 s2, -1 +; GFX11-NEXT: buffer_store_b32 v0, off, s[0:3], 0 ; GFX11-NEXT: s_endpgm %result = shl <2 x i16> %lhs, %rhs store <2 x i16> %result, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll b/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll index d4ee6fa20cad8..7c8417837f788 100644 --- a/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll +++ b/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll @@ -3272,9 +3272,8 @@ define void @v_shuffle_v4f32_v3f32__1_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -3287,8 +3286,7 @@ define void @v_shuffle_v4f32_v3f32__1_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: v_mov_b32_e32 v4, 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 ; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -3416,12 +3414,11 @@ define void @v_shuffle_v4f32_v3f32__4_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v7, 0 ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[4:6] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v7, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -3434,12 +3431,12 @@ define void @v_shuffle_v4f32_v3f32__4_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: v_mov_b32_e32 v7, 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 ; GFX942-NEXT: ;;#ASMSTART ; GFX942-NEXT: ; def v[4:6] ; GFX942-NEXT: ;;#ASMEND +; GFX942-NEXT: s_nop 0 +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0] ; GFX942-NEXT: global_store_dwordx4 v7, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] @@ -6083,9 +6080,8 @@ define void @v_shuffle_v4f32_v3f32__1_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v5, 0 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v5, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6102,8 +6098,7 @@ define void @v_shuffle_v4f32_v3f32__1_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0] +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 ; GFX942-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6241,9 +6236,8 @@ define void @v_shuffle_v4f32_v3f32__4_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6256,8 +6250,7 @@ define void @v_shuffle_v4f32_v3f32__4_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: v_mov_b32_e32 v4, 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 ; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll b/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll index 1a669adf2b635..f7149350e74d3 100644 --- a/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll +++ b/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll @@ -3272,9 +3272,8 @@ define void @v_shuffle_v4i32_v3i32__1_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -3287,8 +3286,7 @@ define void @v_shuffle_v4i32_v3i32__1_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: v_mov_b32_e32 v4, 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 ; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -3416,12 +3414,11 @@ define void @v_shuffle_v4i32_v3i32__4_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v7, 0 ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[4:6] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v7, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -3434,12 +3431,12 @@ define void @v_shuffle_v4i32_v3i32__4_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: v_mov_b32_e32 v7, 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 ; GFX942-NEXT: ;;#ASMSTART ; GFX942-NEXT: ; def v[4:6] ; GFX942-NEXT: ;;#ASMEND +; GFX942-NEXT: s_nop 0 +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0] ; GFX942-NEXT: global_store_dwordx4 v7, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] @@ -6083,9 +6080,8 @@ define void @v_shuffle_v4i32_v3i32__1_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v5, 0 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v5, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6102,8 +6098,7 @@ define void @v_shuffle_v4i32_v3i32__1_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0] +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 ; GFX942-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6241,9 +6236,8 @@ define void @v_shuffle_v4i32_v3i32__4_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6256,8 +6250,7 @@ define void @v_shuffle_v4i32_v3i32__4_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: v_mov_b32_e32 v4, 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 ; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll b/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll index 8039e126590b9..aa9e23b971823 100644 --- a/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll +++ b/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll @@ -3272,9 +3272,8 @@ define void @v_shuffle_v4p3_v3p3__1_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -3287,8 +3286,7 @@ define void @v_shuffle_v4p3_v3p3__1_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: v_mov_b32_e32 v4, 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 ; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -3416,12 +3414,11 @@ define void @v_shuffle_v4p3_v3p3__4_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v7, 0 ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[4:6] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v7, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -3434,12 +3431,12 @@ define void @v_shuffle_v4p3_v3p3__4_2_2_2(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: v_mov_b32_e32 v7, 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 ; GFX942-NEXT: ;;#ASMSTART ; GFX942-NEXT: ; def v[4:6] ; GFX942-NEXT: ;;#ASMEND +; GFX942-NEXT: s_nop 0 +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0] ; GFX942-NEXT: global_store_dwordx4 v7, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] @@ -6083,9 +6080,8 @@ define void @v_shuffle_v4p3_v3p3__1_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v5, 0 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v5, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6102,8 +6098,7 @@ define void @v_shuffle_v4p3_v3p3__1_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: s_nop 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0] +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 ; GFX942-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6241,9 +6236,8 @@ define void @v_shuffle_v4p3_v3p3__4_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def v[0:2] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_mov_b32_e32 v0, v2 ; GFX90A-NEXT: v_mov_b32_e32 v4, 0 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX90A-NEXT: v_mov_b32_e32 v3, v2 ; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17] ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6256,8 +6250,7 @@ define void @v_shuffle_v4p3_v3p3__4_5_5_5(ptr addrspace(1) inreg %ptr) { ; GFX942-NEXT: ; def v[0:2] ; GFX942-NEXT: ;;#ASMEND ; GFX942-NEXT: v_mov_b32_e32 v4, 0 -; GFX942-NEXT: v_mov_b32_e32 v0, v2 -; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0] +; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0] ; GFX942-NEXT: v_mov_b32_e32 v3, v2 ; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/sign_extend.ll b/llvm/test/CodeGen/AMDGPU/sign_extend.ll index cb8bbde71f146..ece46b59ba49e 100644 --- a/llvm/test/CodeGen/AMDGPU/sign_extend.ll +++ b/llvm/test/CodeGen/AMDGPU/sign_extend.ll @@ -6,29 +6,27 @@ define amdgpu_kernel void @s_sext_i1_to_i32(ptr addrspace(1) %out, i32 %a, i32 % ; SI-LABEL: s_sext_i1_to_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_cmp_eq_u32 s2, s3 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_cselect_b64 s[0:1], -1, 0 -; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_cmp_eq_u32 s4, s5 +; SI-NEXT: s_cselect_b64 s[4:5], -1, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5] +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: s_sext_i1_to_i32: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_cmp_eq_u32 s2, s3 -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_cselect_b64 s[0:1], -1, 0 -; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] -; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_cmp_eq_u32 s4, s5 +; VI-NEXT: s_cselect_b64 s[4:5], -1, 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %cmp = icmp eq i32 %a, %b %sext = sext i1 %cmp to i32 @@ -78,31 +76,29 @@ define amdgpu_kernel void @s_sext_i1_to_i64(ptr addrspace(1) %out, i32 %a, i32 % ; SI-LABEL: s_sext_i1_to_i64: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_cmp_eq_u32 s2, s3 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_cselect_b64 s[0:1], -1, 0 -; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_cmp_eq_u32 s4, s5 +; SI-NEXT: s_cselect_b64 s[4:5], -1, 0 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: v_mov_b32_e32 v1, v0 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: s_sext_i1_to_i64: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_cmp_eq_u32 s2, s3 -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_cselect_b64 s[0:1], -1, 0 -; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_cmp_eq_u32 s4, s5 +; VI-NEXT: s_cselect_b64 s[4:5], -1, 0 +; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: v_mov_b32_e32 v1, v0 -; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm %cmp = icmp eq i32 %a, %b %sext = sext i1 %cmp to i64 @@ -218,29 +214,27 @@ define amdgpu_kernel void @s_sext_i1_to_i16(ptr addrspace(1) %out, i32 %a, i32 % ; SI-LABEL: s_sext_i1_to_i16: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_cmp_eq_u32 s2, s3 -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: s_cselect_b64 s[0:1], -1, 0 -; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] -; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_cmp_eq_u32 s4, s5 +; SI-NEXT: s_cselect_b64 s[4:5], -1, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5] +; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: s_sext_i1_to_i16: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_cmp_eq_u32 s2, s3 -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_cselect_b64 s[0:1], -1, 0 -; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] -; VI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_cmp_eq_u32 s4, s5 +; VI-NEXT: s_cselect_b64 s[4:5], -1, 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5] +; VI-NEXT: buffer_store_short v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %cmp = icmp eq i32 %a, %b %sext = sext i1 %cmp to i16 diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll index 5461532184fc5..e836366fd8dbf 100644 --- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll +++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll @@ -1797,8 +1797,8 @@ define amdgpu_ps void @complex_loop(i32 inreg %cmpa, i32 %cmpb, i32 %cmpc) { ; GFX10-WAVE32-NEXT: s_cbranch_scc1 .LBB15_7 ; GFX10-WAVE32-NEXT: ; %bb.1: ; %.lr.ph ; GFX10-WAVE32-NEXT: s_mov_b32 s1, exec_lo -; GFX10-WAVE32-NEXT: s_mov_b32 s0, 0 ; GFX10-WAVE32-NEXT: s_mov_b32 s2, 0 +; GFX10-WAVE32-NEXT: s_mov_b32 s0, 0 ; GFX10-WAVE32-NEXT: s_branch .LBB15_3 ; GFX10-WAVE32-NEXT: .LBB15_2: ; %latch ; GFX10-WAVE32-NEXT: ; in Loop: Header=BB15_3 Depth=1 diff --git a/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll index 47998767a948c..76f8f484fc763 100644 --- a/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll +++ b/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll @@ -369,42 +369,41 @@ define amdgpu_kernel void @s_abs_v4i16(ptr addrspace(1) %out, <4 x i16> %val) #0 ; CI-LABEL: s_abs_v4i16: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; CI-NEXT: s_mov_b32 s7, 0xf000 -; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) -; CI-NEXT: s_mov_b32 s4, s0 -; CI-NEXT: s_mov_b32 s5, s1 -; CI-NEXT: s_ashr_i32 s0, s3, 16 -; CI-NEXT: s_ashr_i32 s1, s2, 16 -; CI-NEXT: s_lshr_b32 s8, s2, 16 -; CI-NEXT: s_lshr_b32 s9, s3, 16 -; CI-NEXT: s_sext_i32_i16 s10, s3 -; CI-NEXT: s_sext_i32_i16 s11, s2 -; CI-NEXT: s_sub_i32 s3, 0, s3 -; CI-NEXT: s_sub_i32 s2, 0, s2 -; CI-NEXT: s_sext_i32_i16 s3, s3 -; CI-NEXT: s_sext_i32_i16 s2, s2 +; CI-NEXT: s_mov_b64 s[4:5], s[2:3] +; CI-NEXT: s_ashr_i32 s6, s5, 16 +; CI-NEXT: s_lshr_b32 s9, s5, 16 +; CI-NEXT: s_sext_i32_i16 s10, s5 +; CI-NEXT: s_sub_i32 s5, 0, s5 +; CI-NEXT: s_ashr_i32 s7, s4, 16 +; CI-NEXT: s_lshr_b32 s8, s4, 16 +; CI-NEXT: s_sext_i32_i16 s11, s4 +; CI-NEXT: s_sext_i32_i16 s5, s5 +; CI-NEXT: s_sub_i32 s4, 0, s4 ; CI-NEXT: s_sub_i32 s9, 0, s9 -; CI-NEXT: s_sub_i32 s8, 0, s8 +; CI-NEXT: s_sext_i32_i16 s4, s4 ; CI-NEXT: s_sext_i32_i16 s9, s9 +; CI-NEXT: s_sub_i32 s8, 0, s8 +; CI-NEXT: s_max_i32 s5, s10, s5 ; CI-NEXT: s_sext_i32_i16 s8, s8 -; CI-NEXT: s_max_i32 s2, s11, s2 -; CI-NEXT: s_max_i32 s3, s10, s3 -; CI-NEXT: s_max_i32 s1, s1, s8 -; CI-NEXT: s_max_i32 s0, s0, s9 -; CI-NEXT: s_add_i32 s3, s3, 2 -; CI-NEXT: s_add_i32 s2, s2, 2 -; CI-NEXT: s_lshl_b32 s0, s0, 16 -; CI-NEXT: s_and_b32 s3, s3, 0xffff -; CI-NEXT: s_lshl_b32 s1, s1, 16 -; CI-NEXT: s_and_b32 s2, s2, 0xffff -; CI-NEXT: s_or_b32 s0, s0, s3 -; CI-NEXT: s_or_b32 s1, s1, s2 -; CI-NEXT: s_add_i32 s0, s0, 0x20000 -; CI-NEXT: s_add_i32 s1, s1, 0x20000 -; CI-NEXT: v_mov_b32_e32 v0, s1 -; CI-NEXT: v_mov_b32_e32 v1, s0 -; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; CI-NEXT: s_max_i32 s6, s6, s9 +; CI-NEXT: s_max_i32 s4, s11, s4 +; CI-NEXT: s_add_i32 s5, s5, 2 +; CI-NEXT: s_max_i32 s7, s7, s8 +; CI-NEXT: s_lshl_b32 s6, s6, 16 +; CI-NEXT: s_and_b32 s5, s5, 0xffff +; CI-NEXT: s_add_i32 s4, s4, 2 +; CI-NEXT: s_or_b32 s5, s6, s5 +; CI-NEXT: s_lshl_b32 s6, s7, 16 +; CI-NEXT: s_and_b32 s4, s4, 0xffff +; CI-NEXT: s_or_b32 s4, s6, s4 +; CI-NEXT: s_add_i32 s5, s5, 0x20000 +; CI-NEXT: s_add_i32 s4, s4, 0x20000 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: v_mov_b32_e32 v0, s4 +; CI-NEXT: v_mov_b32_e32 v1, s5 +; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; CI-NEXT: s_endpgm %z0 = insertelement <4 x i16> poison, i16 0, i16 0 %z1 = insertelement <4 x i16> %z0, i16 0, i16 1 diff --git a/llvm/test/CodeGen/AMDGPU/sub.ll b/llvm/test/CodeGen/AMDGPU/sub.ll index 5c113d80a9c80..0a5160145fbd8 100644 --- a/llvm/test/CodeGen/AMDGPU/sub.ll +++ b/llvm/test/CodeGen/AMDGPU/sub.ll @@ -11,14 +11,13 @@ define amdgpu_kernel void @s_sub_i32(ptr addrspace(1) %out, i32 %a, i32 %b) { ; GFX6-LABEL: s_sub_i32: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: s_sub_i32 s0, s2, s3 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX6-NEXT: s_sub_i32 s4, s4, s5 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX8-LABEL: s_sub_i32: diff --git a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll index 6a273e55fd9a8..82ef28f7339b8 100644 --- a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll +++ b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll @@ -223,44 +223,39 @@ define amdgpu_kernel void @s_test_sub_v2i16_kernarg(ptr addrspace(1) %out, <2 x ; VI-LABEL: s_test_sub_v2i16_kernarg: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_lshr_b32 s0, s3, 16 -; VI-NEXT: s_lshr_b32 s1, s2, 16 -; VI-NEXT: s_sub_i32 s0, s1, s0 -; VI-NEXT: s_sub_i32 s1, s2, s3 -; VI-NEXT: s_lshl_b32 s0, s0, 16 -; VI-NEXT: s_and_b32 s1, s1, 0xffff -; VI-NEXT: s_or_b32 s0, s1, s0 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: s_lshr_b32 s6, s5, 16 +; VI-NEXT: s_lshr_b32 s7, s4, 16 +; VI-NEXT: s_sub_i32 s4, s4, s5 +; VI-NEXT: s_sub_i32 s5, s7, s6 +; VI-NEXT: s_lshl_b32 s5, s5, 16 +; VI-NEXT: s_and_b32 s4, s4, 0xffff +; VI-NEXT: s_or_b32 s4, s4, s5 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; GFX10-LABEL: s_test_sub_v2i16_kernarg: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; GFX10-NEXT: s_mov_b32 s7, 0x31016000 -; GFX10-NEXT: s_mov_b32 s6, -1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: v_pk_sub_i16 v0, s2, s3 -; GFX10-NEXT: s_mov_b32 s4, s0 -; GFX10-NEXT: s_mov_b32 s5, s1 -; GFX10-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX10-NEXT: s_mov_b32 s3, 0x31016000 +; GFX10-NEXT: s_mov_b32 s2, -1 +; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: s_test_sub_v2i16_kernarg: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX11-NEXT: s_mov_b32 s7, 0x31016000 -; GFX11-NEXT: s_mov_b32 s6, -1 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: v_pk_sub_i16 v0, s2, s3 -; GFX11-NEXT: s_mov_b32 s4, s0 -; GFX11-NEXT: s_mov_b32 s5, s1 -; GFX11-NEXT: buffer_store_b32 v0, off, s[4:7], 0 +; GFX11-NEXT: s_mov_b32 s3, 0x31016000 +; GFX11-NEXT: s_mov_b32 s2, -1 +; GFX11-NEXT: buffer_store_b32 v0, off, s[0:3], 0 ; GFX11-NEXT: s_endpgm %add = sub <2 x i16> %a, %b store <2 x i16> %add, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/udiv.ll b/llvm/test/CodeGen/AMDGPU/udiv.ll index 063c56faf9ce4..1f93bf7a68972 100644 --- a/llvm/test/CodeGen/AMDGPU/udiv.ll +++ b/llvm/test/CodeGen/AMDGPU/udiv.ll @@ -189,67 +189,65 @@ define amdgpu_kernel void @s_udiv_i32(ptr addrspace(1) %out, i32 %a, i32 %b) { ; SI-LABEL: s_udiv_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: v_cvt_f32_u32_e32 v0, s3 -; SI-NEXT: s_sub_i32 s4, 0, s3 -; SI-NEXT: s_mov_b32 s5, s1 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: v_cvt_f32_u32_e32 v0, s5 +; SI-NEXT: s_sub_i32 s2, 0, s5 +; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; SI-NEXT: v_cvt_u32_f32_e32 v0, v0 -; SI-NEXT: v_mul_lo_u32 v1, s4, v0 -; SI-NEXT: s_mov_b32 s4, s0 +; SI-NEXT: v_mul_lo_u32 v1, s2, v0 +; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: v_mul_hi_u32 v1, v0, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; SI-NEXT: v_mul_hi_u32 v0, s2, v0 -; SI-NEXT: v_readfirstlane_b32 s0, v0 -; SI-NEXT: s_mul_i32 s0, s0, s3 -; SI-NEXT: s_sub_i32 s0, s2, s0 -; SI-NEXT: s_sub_i32 s1, s0, s3 +; SI-NEXT: v_mul_hi_u32 v0, s4, v0 +; SI-NEXT: v_readfirstlane_b32 s6, v0 +; SI-NEXT: s_mul_i32 s6, s6, s5 +; SI-NEXT: s_sub_i32 s4, s4, s6 +; SI-NEXT: s_sub_i32 s6, s4, s5 ; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0 -; SI-NEXT: s_cmp_ge_u32 s0, s3 +; SI-NEXT: s_cmp_ge_u32 s4, s5 ; SI-NEXT: s_cselect_b64 vcc, -1, 0 ; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; SI-NEXT: s_cselect_b32 s0, s1, s0 +; SI-NEXT: s_cselect_b32 s4, s6, s4 ; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0 -; SI-NEXT: s_cmp_ge_u32 s0, s3 +; SI-NEXT: s_cmp_ge_u32 s4, s5 ; SI-NEXT: s_cselect_b64 vcc, -1, 0 ; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: s_udiv_i32: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_cvt_f32_u32_e32 v0, s3 -; VI-NEXT: s_sub_i32 s4, 0, s3 -; VI-NEXT: s_mov_b32 s5, s1 +; VI-NEXT: s_mov_b64 s[4:5], s[2:3] +; VI-NEXT: v_cvt_f32_u32_e32 v0, s5 +; VI-NEXT: s_sub_i32 s2, 0, s5 +; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; VI-NEXT: v_cvt_u32_f32_e32 v0, v0 -; VI-NEXT: v_mul_lo_u32 v1, s4, v0 -; VI-NEXT: s_mov_b32 s4, s0 +; VI-NEXT: v_mul_lo_u32 v1, s2, v0 +; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: v_mul_hi_u32 v1, v0, v1 ; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1 -; VI-NEXT: v_mul_hi_u32 v0, s2, v0 -; VI-NEXT: v_readfirstlane_b32 s0, v0 -; VI-NEXT: s_mul_i32 s0, s0, s3 -; VI-NEXT: s_sub_i32 s0, s2, s0 -; VI-NEXT: s_sub_i32 s1, s0, s3 +; VI-NEXT: v_mul_hi_u32 v0, s4, v0 +; VI-NEXT: v_readfirstlane_b32 s6, v0 +; VI-NEXT: s_mul_i32 s6, s6, s5 +; VI-NEXT: s_sub_i32 s4, s4, s6 +; VI-NEXT: s_sub_i32 s6, s4, s5 ; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0 -; VI-NEXT: s_cmp_ge_u32 s0, s3 +; VI-NEXT: s_cmp_ge_u32 s4, s5 ; VI-NEXT: s_cselect_b64 vcc, -1, 0 ; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; VI-NEXT: s_cselect_b32 s0, s1, s0 +; VI-NEXT: s_cselect_b32 s4, s6, s4 ; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0 -; VI-NEXT: s_cmp_ge_u32 s0, s3 +; VI-NEXT: s_cmp_ge_u32 s4, s5 ; VI-NEXT: s_cselect_b64 vcc, -1, 0 ; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; ; GCN-LABEL: s_udiv_i32: diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll index 775483c040b7f..1c50f930facba 100644 --- a/llvm/test/CodeGen/AMDGPU/udiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll @@ -716,8 +716,6 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48 ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: v_mov_b32_e32 v3, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_and_b32 s3, s3, 0xffff @@ -729,25 +727,23 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48 ; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 ; GCN-NEXT: v_cvt_f32_u32_e32 v1, s2 ; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v0 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 ; GCN-NEXT: v_mul_f32_e32 v2, v1, v2 ; GCN-NEXT: v_trunc_f32_e32 v2, v2 +; GCN-NEXT: v_cvt_u32_f32_e32 v4, v2 ; GCN-NEXT: v_mad_f32 v1, -v2, v0, v1 -; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2 ; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc +; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v4, vcc ; GCN-NEXT: v_and_b32_e32 v0, 0xffffff, v0 -; GCN-NEXT: buffer_store_short v3, off, s[4:7], 0 offset:4 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: buffer_store_short v3, off, s[0:3], 0 offset:4 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; GCN-IR-LABEL: s_test_udiv24_i48: ; GCN-IR: ; %bb.0: ; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd -; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 -; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) ; GCN-IR-NEXT: s_and_b32 s3, s3, 0xffff @@ -759,17 +755,17 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48 ; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 ; GCN-IR-NEXT: v_cvt_f32_u32_e32 v1, s2 ; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v2, v0 -; GCN-IR-NEXT: s_mov_b32 s4, s0 -; GCN-IR-NEXT: s_mov_b32 s5, s1 +; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s2, -1 ; GCN-IR-NEXT: v_mul_f32_e32 v2, v1, v2 ; GCN-IR-NEXT: v_trunc_f32_e32 v2, v2 +; GCN-IR-NEXT: v_cvt_u32_f32_e32 v4, v2 ; GCN-IR-NEXT: v_mad_f32 v1, -v2, v0, v1 -; GCN-IR-NEXT: v_cvt_u32_f32_e32 v2, v2 ; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc +; GCN-IR-NEXT: v_addc_u32_e32 v0, vcc, 0, v4, vcc ; GCN-IR-NEXT: v_and_b32_e32 v0, 0xffffff, v0 -; GCN-IR-NEXT: buffer_store_short v3, off, s[4:7], 0 offset:4 -; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-IR-NEXT: buffer_store_short v3, off, s[0:3], 0 offset:4 +; GCN-IR-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-IR-NEXT: s_endpgm %1 = lshr i48 %x, 24 %2 = lshr i48 %y, 24 diff --git a/llvm/test/CodeGen/AMDGPU/while-break.ll b/llvm/test/CodeGen/AMDGPU/while-break.ll index 19c8e842a1390..2b7e28362724b 100644 --- a/llvm/test/CodeGen/AMDGPU/while-break.ll +++ b/llvm/test/CodeGen/AMDGPU/while-break.ll @@ -157,8 +157,8 @@ define amdgpu_ps < 2 x float> @while_break_two_chains_of_phi(float %v, i32 %x, i ; GCN-LABEL: while_break_two_chains_of_phi: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: v_mov_b32_e32 v6, 0 -; GCN-NEXT: s_mov_b32 s2, 0 ; GCN-NEXT: s_mov_b32 s0, 0 +; GCN-NEXT: s_mov_b32 s2, 0 ; GCN-NEXT: s_branch .LBB2_2 ; GCN-NEXT: .LBB2_1: ; %Flow1 ; GCN-NEXT: ; in Loop: Header=BB2_2 Depth=1 diff --git a/llvm/test/CodeGen/AMDGPU/xor.ll b/llvm/test/CodeGen/AMDGPU/xor.ll index feb6ecd996516..92280b9ad8acf 100644 --- a/llvm/test/CodeGen/AMDGPU/xor.ll +++ b/llvm/test/CodeGen/AMDGPU/xor.ll @@ -298,14 +298,13 @@ define amdgpu_kernel void @scalar_xor_i32(ptr addrspace(1) %out, i32 %a, i32 %b) ; SI-LABEL: scalar_xor_i32: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mov_b32 s4, s0 -; SI-NEXT: s_xor_b32 s0, s2, s3 -; SI-NEXT: s_mov_b32 s5, s1 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_xor_b32 s4, s4, s5 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: scalar_xor_i32: diff --git a/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll b/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll index c3935821c31dd..d9f5ba92e116d 100644 --- a/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll +++ b/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll @@ -5,15 +5,14 @@ define amdgpu_kernel void @zext_i16_to_i32_uniform(ptr addrspace(1) %out, i16 %a ; GCN-LABEL: zext_i16_to_i32_uniform: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_and_b32 s0, s2, 0xffff -; GCN-NEXT: s_add_i32 s0, s3, s0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: s_and_b32 s4, s4, 0xffff +; GCN-NEXT: s_add_i32 s4, s5, s4 +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm %zext = zext i16 %a to i32 %res = add i32 %b, %zext diff --git a/llvm/test/CodeGen/ARM/llvm.sincospi.ll b/llvm/test/CodeGen/ARM/llvm.sincospi.ll new file mode 100644 index 0000000000000..91bf0aaf1806a --- /dev/null +++ b/llvm/test/CodeGen/ARM/llvm.sincospi.ll @@ -0,0 +1,249 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=thumbv7-apple-ios7.0.0 < %s | FileCheck %s + +define { half, half } @test_sincospi_f16(half %a) #0 { +; CHECK-LABEL: test_sincospi_f16: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: bl ___extendhfsf2 +; CHECK-NEXT: add r1, sp, #4 +; CHECK-NEXT: mov r2, sp +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr r0, [sp, #4] +; CHECK-NEXT: bl ___truncsfhf2 +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: ldr r0, [sp] +; CHECK-NEXT: bl ___truncsfhf2 +; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: add sp, #8 +; CHECK-NEXT: pop {r4, pc} + %result = call { half, half } @llvm.sincospi.f16(half %a) + ret { half, half } %result +} + +define half @test_sincospi_f16_only_use_sin(half %a) #0 { +; CHECK-LABEL: test_sincospi_f16_only_use_sin: +; CHECK: @ %bb.0: +; CHECK-NEXT: str lr, [sp, #-4]! +; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: bl ___extendhfsf2 +; CHECK-NEXT: add r1, sp, #4 +; CHECK-NEXT: mov r2, sp +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr r0, [sp, #4] +; CHECK-NEXT: bl ___truncsfhf2 +; CHECK-NEXT: add sp, #8 +; CHECK-NEXT: ldr lr, [sp], #4 +; CHECK-NEXT: bx lr + %result = call { half, half } @llvm.sincospi.f16(half %a) + %result.0 = extractvalue { half, half } %result, 0 + ret half %result.0 +} + +define half @test_sincospi_f16_only_use_cos(half %a) #0 { +; CHECK-LABEL: test_sincospi_f16_only_use_cos: +; CHECK: @ %bb.0: +; CHECK-NEXT: str lr, [sp, #-4]! +; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: bl ___extendhfsf2 +; CHECK-NEXT: add r1, sp, #4 +; CHECK-NEXT: mov r2, sp +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr r0, [sp] +; CHECK-NEXT: bl ___truncsfhf2 +; CHECK-NEXT: add sp, #8 +; CHECK-NEXT: ldr lr, [sp], #4 +; CHECK-NEXT: bx lr + %result = call { half, half } @llvm.sincospi.f16(half %a) + %result.1 = extractvalue { half, half } %result, 1 + ret half %result.1 +} + +define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) #0 { +; CHECK-LABEL: test_sincospi_v2f16: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: vpush {d8} +; CHECK-NEXT: sub sp, #24 +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r0, r1 +; CHECK-NEXT: bl ___extendhfsf2 +; CHECK-NEXT: add r1, sp, #12 +; CHECK-NEXT: add r2, sp, #8 +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: bl ___extendhfsf2 +; CHECK-NEXT: add r1, sp, #4 +; CHECK-NEXT: mov r2, sp +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr r0, [sp, #12] +; CHECK-NEXT: bl ___truncsfhf2 +; CHECK-NEXT: ldr r1, [sp, #4] +; CHECK-NEXT: strh.w r0, [sp, #22] +; CHECK-NEXT: mov r0, r1 +; CHECK-NEXT: bl ___truncsfhf2 +; CHECK-NEXT: strh.w r0, [sp, #20] +; CHECK-NEXT: add r0, sp, #20 +; CHECK-NEXT: vld1.32 {d8[0]}, [r0:32] +; CHECK-NEXT: ldr r0, [sp, #8] +; CHECK-NEXT: bl ___truncsfhf2 +; CHECK-NEXT: ldr r1, [sp] +; CHECK-NEXT: strh.w r0, [sp, #18] +; CHECK-NEXT: mov r0, r1 +; CHECK-NEXT: bl ___truncsfhf2 +; CHECK-NEXT: strh.w r0, [sp, #16] +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vmovl.u16 q9, d8 +; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32] +; CHECK-NEXT: vmovl.u16 q8, d16 +; CHECK-NEXT: vmov.32 r0, d18[0] +; CHECK-NEXT: vmov.32 r1, d18[1] +; CHECK-NEXT: vmov.32 r2, d16[0] +; CHECK-NEXT: vmov.32 r3, d16[1] +; CHECK-NEXT: add sp, #24 +; CHECK-NEXT: vpop {d8} +; CHECK-NEXT: pop {r4, pc} + %result = call { <2 x half>, <2 x half> } @llvm.sincospi.v2f16(<2 x half> %a) + ret { <2 x half>, <2 x half> } %result +} + +define { float, float } @test_sincospi_f32(float %a) #0 { +; CHECK-LABEL: test_sincospi_f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: str lr, [sp, #-4]! +; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: add r1, sp, #4 +; CHECK-NEXT: mov r2, sp +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldrd r1, r0, [sp], #8 +; CHECK-NEXT: ldr lr, [sp], #4 +; CHECK-NEXT: bx lr + %result = call { float, float } @llvm.sincospi.f32(float %a) + ret { float, float } %result +} + +define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) #0 { +; CHECK-LABEL: test_sincospi_v2f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: str lr, [sp, #-4]! +; CHECK-NEXT: vpush {d8} +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vmov d8, r0, r1 +; CHECK-NEXT: add r1, sp, #4 +; CHECK-NEXT: mov r2, sp +; CHECK-NEXT: vmov r0, s17 +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: vmov r0, s16 +; CHECK-NEXT: add r1, sp, #12 +; CHECK-NEXT: add r2, sp, #8 +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: vldr s1, [sp, #4] +; CHECK-NEXT: vldr s3, [sp] +; CHECK-NEXT: vldr s0, [sp, #12] +; CHECK-NEXT: vldr s2, [sp, #8] +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8} +; CHECK-NEXT: ldr lr, [sp], #4 +; CHECK-NEXT: bx lr + %result = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> %a) + ret { <2 x float>, <2 x float> } %result +} + +define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) #0 { +; CHECK-LABEL: test_sincospi_v3f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r4, r5, r6, r7, lr} +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: mov r6, r2 +; CHECK-NEXT: mov r7, r1 +; CHECK-NEXT: add r1, sp, #12 +; CHECK-NEXT: add r2, sp, #8 +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r0, r6 +; CHECK-NEXT: mov r5, r3 +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: add r1, sp, #4 +; CHECK-NEXT: mov r2, sp +; CHECK-NEXT: mov r0, r7 +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: ldr r0, [sp, #36] +; CHECK-NEXT: vmov d0, r7, r6 +; CHECK-NEXT: mov r1, r4 +; CHECK-NEXT: add.w r2, r4, #16 +; CHECK-NEXT: vmov d1, r5, r0 +; CHECK-NEXT: vmov r0, s2 +; CHECK-NEXT: vldr s1, [sp, #8] +; CHECK-NEXT: vldr s3, [sp, #12] +; CHECK-NEXT: vldr s2, [sp, #4] +; CHECK-NEXT: vldr s0, [sp] +; CHECK-NEXT: vst1.32 {d1}, [r1:64]! +; CHECK-NEXT: vst1.32 {d0}, [r2:64]! +; CHECK-NEXT: bl ___sincospif +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: pop {r4, r5, r6, r7, pc} + %result = call { <3 x float>, <3 x float> } @llvm.sincospi.v3f32(<3 x float> %a) + ret { <3 x float>, <3 x float> } %result +} + +define { double, double } @test_sincospi_f64(double %a) #0 { +; CHECK-LABEL: test_sincospi_f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r4, r7, lr} +; CHECK-NEXT: add r7, sp, #4 +; CHECK-NEXT: sub sp, #20 +; CHECK-NEXT: mov r4, sp +; CHECK-NEXT: bfc r4, #0, #3 +; CHECK-NEXT: mov sp, r4 +; CHECK-NEXT: add r2, sp, #8 +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: bl ___sincospi +; CHECK-NEXT: subs r4, r7, #4 +; CHECK-NEXT: ldrd r0, r1, [sp, #8] +; CHECK-NEXT: ldrd r2, r3, [sp] +; CHECK-NEXT: mov sp, r4 +; CHECK-NEXT: pop {r4, r7, pc} + %result = call { double, double } @llvm.sincospi.f64(double %a) + ret { double, double } %result +} + +define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) #0 { +; CHECK-LABEL: test_sincospi_v2f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: add r7, sp, #16 +; CHECK-NEXT: sub sp, #32 +; CHECK-NEXT: mov r4, sp +; CHECK-NEXT: bfc r4, #0, #3 +; CHECK-NEXT: mov sp, r4 +; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: ldr r1, [r7, #8] +; CHECK-NEXT: mov r5, r3 +; CHECK-NEXT: mov r8, r2 +; CHECK-NEXT: add r2, sp, #24 +; CHECK-NEXT: add r3, sp, #16 +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r0, r5 +; CHECK-NEXT: bl ___sincospi +; CHECK-NEXT: add r2, sp, #8 +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: mov r0, r6 +; CHECK-NEXT: mov r1, r8 +; CHECK-NEXT: bl ___sincospi +; CHECK-NEXT: vldr d19, [sp, #24] +; CHECK-NEXT: vldr d18, [sp, #8] +; CHECK-NEXT: vldr d17, [sp, #16] +; CHECK-NEXT: vldr d16, [sp] +; CHECK-NEXT: vst1.32 {d18, d19}, [r4]! +; CHECK-NEXT: vst1.32 {d16, d17}, [r4] +; CHECK-NEXT: sub.w r4, r7, #16 +; CHECK-NEXT: mov sp, r4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} + %result = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> %a) + ret { <2 x double>, <2 x double> } %result +} + +attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll b/llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll deleted file mode 100644 index 9dd402d13b8e0..0000000000000 --- a/llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll +++ /dev/null @@ -1,48 +0,0 @@ -; REQUIRES: have_tflite -; REQUIRES: x86_64-linux -; -; Check that we log the currently in development features correctly with both the default -; case and with a learned policy. -; -; RUN: llc -o /dev/null -mtriple=x86_64-linux-unknown -regalloc=greedy \ -; RUN: -regalloc-enable-advisor=development \ -; RUN: -regalloc-training-log=%t1 \ -; RUN: -regalloc-enable-development-features < %S/Inputs/input.ll -; RUN: %python %S/../../../lib/Analysis/models/log_reader.py %t1 > %t1.readable -; RUN: FileCheck --input-file %t1.readable %s - -; RUN: rm -rf %t && mkdir %t -; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t_savedmodel -; RUN: %python %S/../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t -; RUN: llc -o /dev/null -mtriple=x86_64-linux-unknown -regalloc=greedy \ -; RUN: -regalloc-enable-advisor=development \ -; RUN: -regalloc-training-log=%t2 -regalloc-model=%t \ -; RUN: -regalloc-enable-development-features < %S/Inputs/input.ll -; RUN: %python %S/../../../lib/Analysis/models/log_reader.py %t2 > %t2.readable -; RUN: FileCheck --input-file %t2.readable %s - -; CHECK-NOT: nan -; Check the first five opcodes in the first eviction problem -; Also, the first eviction problem is significantly less than 300 instructions. Check -; that there is a zero value. -; Note: we're regex-ing some of the opcodes to avoid test flakyness. -; CHECK: instructions: 20,{{([0-9]{4})}},{{([0-9]{4})}},{{([0-9]{4})}},{{.*}},0, -; Only the candidate virtreg and the 10th LR are included in this problem. Make -; sure the other LRs have values of zero. There are 2700 0s followed by some 1s. -; There's a limit to how many repetitions can be matched. -; CHECK: instructions_mapping: {{(((0,){27}){100})}} -; CHECK-SAME: 1 -; Indexing 300 back from where the candidate vr actual resides due to the fact -; that not all the values between the 10th LR and the candidate are zero. -; CHECK-SAME-COUNT-6600: 0, -; CHECK-SAME: 1 -; Ensure that we can still go through the mapping matrices for the rest of the -; eviction problems to make sure we haven't hit the end of the matrix above. -; There are a total of 23 eviction problems with this test. -; CHECK-LABEL: observation: 16 -; Make sure that we're exporting the mbb_frequencies. Don't actually check -; values due to all values being floating point/liable to change very easily. -; CHECK: mbb_frequencies: -; Make sure that we have the mbb_mapping feature, and that the first couple -; of values are correct. -; CHECK: mbb_mapping: 0,0,0,0,1,1,1 diff --git a/llvm/test/CodeGen/PowerPC/llvm.sincos.ll b/llvm/test/CodeGen/PowerPC/llvm.sincos.ll index aaf81ff814488..5b4e91c449522 100644 --- a/llvm/test/CodeGen/PowerPC/llvm.sincos.ll +++ b/llvm/test/CodeGen/PowerPC/llvm.sincos.ll @@ -26,30 +26,6 @@ define { ppc_fp128, ppc_fp128 } @test_sincos_ppcf128(ppc_fp128 %a) { ret { ppc_fp128, ppc_fp128 } %result } -define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128(ppc_fp128 %a) { -; CHECK-LABEL: test_sincospi_ppcf128: -; CHECK: # %bb.0: -; CHECK-NEXT: mflr r0 -; CHECK-NEXT: stdu r1, -64(r1) -; CHECK-NEXT: std r0, 80(r1) -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset lr, 16 -; CHECK-NEXT: addi r5, r1, 48 -; CHECK-NEXT: addi r6, r1, 32 -; CHECK-NEXT: bl sincospil -; CHECK-NEXT: nop -; CHECK-NEXT: lfd f1, 48(r1) -; CHECK-NEXT: lfd f2, 56(r1) -; CHECK-NEXT: lfd f3, 32(r1) -; CHECK-NEXT: lfd f4, 40(r1) -; CHECK-NEXT: addi r1, r1, 64 -; CHECK-NEXT: ld r0, 16(r1) -; CHECK-NEXT: mtlr r0 -; CHECK-NEXT: blr - %result = call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a) - ret { ppc_fp128, ppc_fp128 } %result -} - ; FIXME: This could be made a tail call with the default expansion of llvm.sincos. define void @test_sincos_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_sin, ptr noalias %out_cos) { ; CHECK-LABEL: test_sincos_ppcf128_void_tail_call: @@ -73,29 +49,6 @@ define void @test_sincos_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_s ret void } -; FIXME: This could be made a tail call with the default expansion of llvm.sincospi. -define void @test_sincospi_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_sin, ptr noalias %out_cos) { -; CHECK-LABEL: test_sincospi_ppcf128_void_tail_call: -; CHECK: # %bb.0: -; CHECK-NEXT: mflr r0 -; CHECK-NEXT: stdu r1, -32(r1) -; CHECK-NEXT: std r0, 48(r1) -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .cfi_offset lr, 16 -; CHECK-NEXT: bl sincospil -; CHECK-NEXT: nop -; CHECK-NEXT: addi r1, r1, 32 -; CHECK-NEXT: ld r0, 16(r1) -; CHECK-NEXT: mtlr r0 -; CHECK-NEXT: blr - %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a) - %result.0 = extractvalue { ppc_fp128, ppc_fp128 } %result, 0 - %result.1 = extractvalue { ppc_fp128, ppc_fp128 } %result, 1 - store ppc_fp128 %result.0, ptr %out_sin, align 16 - store ppc_fp128 %result.1, ptr %out_cos, align 16 - ret void -} - ; NOTE: This would need a struct-return library call for llvm.sincos to become a tail call. define { ppc_fp128, ppc_fp128 } @test_sincos_ppcf128_tail_call(ppc_fp128 %a) { ; CHECK-LABEL: test_sincos_ppcf128_tail_call: @@ -120,28 +73,3 @@ define { ppc_fp128, ppc_fp128 } @test_sincos_ppcf128_tail_call(ppc_fp128 %a) { %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincos.ppcf128(ppc_fp128 %a) ret { ppc_fp128, ppc_fp128 } %result } - -; NOTE: This would need a struct-return library call for llvm.sincospi to become a tail call. -define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128_tail_call(ppc_fp128 %a) { -; CHECK-LABEL: test_sincospi_ppcf128_tail_call: -; CHECK: # %bb.0: -; CHECK-NEXT: mflr r0 -; CHECK-NEXT: stdu r1, -64(r1) -; CHECK-NEXT: std r0, 80(r1) -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset lr, 16 -; CHECK-NEXT: addi r5, r1, 48 -; CHECK-NEXT: addi r6, r1, 32 -; CHECK-NEXT: bl sincospil -; CHECK-NEXT: nop -; CHECK-NEXT: lfd f1, 48(r1) -; CHECK-NEXT: lfd f2, 56(r1) -; CHECK-NEXT: lfd f3, 32(r1) -; CHECK-NEXT: lfd f4, 40(r1) -; CHECK-NEXT: addi r1, r1, 64 -; CHECK-NEXT: ld r0, 16(r1) -; CHECK-NEXT: mtlr r0 -; CHECK-NEXT: blr - %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a) - ret { ppc_fp128, ppc_fp128 } %result -} diff --git a/llvm/test/CodeGen/PowerPC/llvm.sincospi.ll b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ll new file mode 100644 index 0000000000000..75e7559386f16 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ll @@ -0,0 +1,21 @@ +; RUN: not llc -mtriple=powerpc64le-gnu-linux -filetype=null %s 2>&1 | FileCheck %s + +; CHECK: error: no libcall available for fsincospi +define { half, half } @test_sincospi_f16(half %a) #0 { + %result = call { half, half } @llvm.sincospi.f16(half %a) + ret { half, half } %result +} + +; CHECK: error: no libcall available for fsincospi +define { float, float } @test_sincospi_f32(float %a) #0 { + %result = call { float, float } @llvm.sincospi.f32(float %a) + ret { float, float } %result +} + +; CHECK: error: no libcall available for fsincospi +define { double, double } @test_sincospi_f64(double %a) #0 { + %result = call { double, double } @llvm.sincospi.f64(double %a) + ret { double, double } %result +} + +attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll new file mode 100644 index 0000000000000..bc656bb785e9e --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll @@ -0,0 +1,25 @@ +; XFAIL: * +; FIXME: asserts +; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-gnu-linux -filetype=null \ +; RUN: -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names %s + +define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128(ppc_fp128 %a) { + %result = call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a) + ret { ppc_fp128, ppc_fp128 } %result +} + +; FIXME: This could be made a tail call with the default expansion of llvm.sincospi. +define void @test_sincospi_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_sin, ptr noalias %out_cos) { + %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a) + %result.0 = extractvalue { ppc_fp128, ppc_fp128 } %result, 0 + %result.1 = extractvalue { ppc_fp128, ppc_fp128 } %result, 1 + store ppc_fp128 %result.0, ptr %out_sin, align 16 + store ppc_fp128 %result.1, ptr %out_cos, align 16 + ret void +} + +; NOTE: This would need a struct-return library call for llvm.sincospi to become a tail call. +define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128_tail_call(ppc_fp128 %a) { + %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a) + ret { ppc_fp128, ppc_fp128 } %result +} diff --git a/llvm/test/CodeGen/PowerPC/milicode32.ll b/llvm/test/CodeGen/PowerPC/milicode32.ll index 78d036202fe4e..ddadd01a748f1 100644 --- a/llvm/test/CodeGen/PowerPC/milicode32.ll +++ b/llvm/test/CodeGen/PowerPC/milicode32.ll @@ -69,3 +69,59 @@ entry: } declare i32 @strlen(ptr noundef) nounwind + +define ptr @test_memmove(ptr noundef %destination, ptr noundef %source, i32 noundef %num) #0 { +; CHECK-AIX-32-P9-LABEL: test_memmove: +; CHECK-AIX-32-P9: # %bb.0: # %entry +; CHECK-AIX-32-P9-NEXT: mflr r0 +; CHECK-AIX-32-P9-NEXT: stwu r1, -80(r1) +; CHECK-AIX-32-P9-NEXT: stw r0, 88(r1) +; CHECK-AIX-32-P9-NEXT: stw r31, 76(r1) # 4-byte Folded Spill +; CHECK-AIX-32-P9-NEXT: mr r31, r3 +; CHECK-AIX-32-P9-NEXT: stw r3, 72(r1) +; CHECK-AIX-32-P9-NEXT: stw r4, 68(r1) +; CHECK-AIX-32-P9-NEXT: stw r5, 64(r1) +; CHECK-AIX-32-P9-NEXT: bl .___memmove[PR] +; CHECK-AIX-32-P9-NEXT: nop +; CHECK-AIX-32-P9-NEXT: mr r3, r31 +; CHECK-AIX-32-P9-NEXT: lwz r31, 76(r1) # 4-byte Folded Reload +; CHECK-AIX-32-P9-NEXT: addi r1, r1, 80 +; CHECK-AIX-32-P9-NEXT: lwz r0, 8(r1) +; CHECK-AIX-32-P9-NEXT: mtlr r0 +; CHECK-AIX-32-P9-NEXT: blr +; +; CHECK-LINUX32-P9-LABEL: test_memmove: +; CHECK-LINUX32-P9: # %bb.0: # %entry +; CHECK-LINUX32-P9-NEXT: mflr r0 +; CHECK-LINUX32-P9-NEXT: stwu r1, -32(r1) +; CHECK-LINUX32-P9-NEXT: stw r0, 36(r1) +; CHECK-LINUX32-P9-NEXT: .cfi_def_cfa_offset 32 +; CHECK-LINUX32-P9-NEXT: .cfi_offset lr, 4 +; CHECK-LINUX32-P9-NEXT: .cfi_offset r30, -8 +; CHECK-LINUX32-P9-NEXT: stw r30, 24(r1) # 4-byte Folded Spill +; CHECK-LINUX32-P9-NEXT: mr r30, r3 +; CHECK-LINUX32-P9-NEXT: stw r3, 20(r1) +; CHECK-LINUX32-P9-NEXT: stw r4, 16(r1) +; CHECK-LINUX32-P9-NEXT: stw r5, 12(r1) +; CHECK-LINUX32-P9-NEXT: bl memmove +; CHECK-LINUX32-P9-NEXT: mr r3, r30 +; CHECK-LINUX32-P9-NEXT: lwz r30, 24(r1) # 4-byte Folded Reload +; CHECK-LINUX32-P9-NEXT: lwz r0, 36(r1) +; CHECK-LINUX32-P9-NEXT: addi r1, r1, 32 +; CHECK-LINUX32-P9-NEXT: mtlr r0 +; CHECK-LINUX32-P9-NEXT: blr +entry: + %destination.addr = alloca ptr, align 4 + %source.addr = alloca ptr, align 4 + %num.addr = alloca i32, align 4 + store ptr %destination, ptr %destination.addr, align 4 + store ptr %source, ptr %source.addr, align 4 + store i32 %num, ptr %num.addr, align 4 + %0 = load ptr, ptr %destination.addr, align 4 + %1 = load ptr, ptr %source.addr, align 4 + %2 = load i32, ptr %num.addr, align 4 + call void @llvm.memmove.p0.p0.i32(ptr align 1 %0, ptr align 1 %1, i32 %2, i1 false) + ret ptr %0 +} + +declare void @llvm.memmove.p0.p0.i32(ptr writeonly captures(none), ptr readonly captures(none), i32, i1 immarg) diff --git a/llvm/test/CodeGen/PowerPC/milicode64.ll b/llvm/test/CodeGen/PowerPC/milicode64.ll index 8b87529d9a6d8..2dbf4140a0fa4 100644 --- a/llvm/test/CodeGen/PowerPC/milicode64.ll +++ b/llvm/test/CodeGen/PowerPC/milicode64.ll @@ -100,3 +100,82 @@ entry: } declare i64 @strlen(ptr noundef) nounwind + +define ptr @test_memmove(ptr noundef %destination, ptr noundef %source, i64 noundef %num) #0 { +; CHECK-LE-P9-LABEL: test_memmove: +; CHECK-LE-P9: # %bb.0: # %entry +; CHECK-LE-P9-NEXT: mflr r0 +; CHECK-LE-P9-NEXT: .cfi_def_cfa_offset 80 +; CHECK-LE-P9-NEXT: .cfi_offset lr, 16 +; CHECK-LE-P9-NEXT: .cfi_offset r30, -16 +; CHECK-LE-P9-NEXT: std r30, -16(r1) # 8-byte Folded Spill +; CHECK-LE-P9-NEXT: stdu r1, -80(r1) +; CHECK-LE-P9-NEXT: std r0, 96(r1) +; CHECK-LE-P9-NEXT: mr r30, r3 +; CHECK-LE-P9-NEXT: std r3, 56(r1) +; CHECK-LE-P9-NEXT: std r4, 48(r1) +; CHECK-LE-P9-NEXT: std r5, 40(r1) +; CHECK-LE-P9-NEXT: bl memmove +; CHECK-LE-P9-NEXT: nop +; CHECK-LE-P9-NEXT: mr r3, r30 +; CHECK-LE-P9-NEXT: addi r1, r1, 80 +; CHECK-LE-P9-NEXT: ld r0, 16(r1) +; CHECK-LE-P9-NEXT: ld r30, -16(r1) # 8-byte Folded Reload +; CHECK-LE-P9-NEXT: mtlr r0 +; CHECK-LE-P9-NEXT: blr +; +; CHECK-BE-P9-LABEL: test_memmove: +; CHECK-BE-P9: # %bb.0: # %entry +; CHECK-BE-P9-NEXT: mflr r0 +; CHECK-BE-P9-NEXT: stdu r1, -160(r1) +; CHECK-BE-P9-NEXT: std r0, 176(r1) +; CHECK-BE-P9-NEXT: .cfi_def_cfa_offset 160 +; CHECK-BE-P9-NEXT: .cfi_offset lr, 16 +; CHECK-BE-P9-NEXT: .cfi_offset r30, -16 +; CHECK-BE-P9-NEXT: std r30, 144(r1) # 8-byte Folded Spill +; CHECK-BE-P9-NEXT: mr r30, r3 +; CHECK-BE-P9-NEXT: std r3, 136(r1) +; CHECK-BE-P9-NEXT: std r4, 128(r1) +; CHECK-BE-P9-NEXT: std r5, 120(r1) +; CHECK-BE-P9-NEXT: bl memmove +; CHECK-BE-P9-NEXT: nop +; CHECK-BE-P9-NEXT: mr r3, r30 +; CHECK-BE-P9-NEXT: ld r30, 144(r1) # 8-byte Folded Reload +; CHECK-BE-P9-NEXT: addi r1, r1, 160 +; CHECK-BE-P9-NEXT: ld r0, 16(r1) +; CHECK-BE-P9-NEXT: mtlr r0 +; CHECK-BE-P9-NEXT: blr +; +; CHECK-AIX-64-P9-LABEL: test_memmove: +; CHECK-AIX-64-P9: # %bb.0: # %entry +; CHECK-AIX-64-P9-NEXT: mflr r0 +; CHECK-AIX-64-P9-NEXT: stdu r1, -144(r1) +; CHECK-AIX-64-P9-NEXT: std r0, 160(r1) +; CHECK-AIX-64-P9-NEXT: std r31, 136(r1) # 8-byte Folded Spill +; CHECK-AIX-64-P9-NEXT: mr r31, r3 +; CHECK-AIX-64-P9-NEXT: std r3, 128(r1) +; CHECK-AIX-64-P9-NEXT: std r4, 120(r1) +; CHECK-AIX-64-P9-NEXT: std r5, 112(r1) +; CHECK-AIX-64-P9-NEXT: bl .___memmove64[PR] +; CHECK-AIX-64-P9-NEXT: nop +; CHECK-AIX-64-P9-NEXT: mr r3, r31 +; CHECK-AIX-64-P9-NEXT: ld r31, 136(r1) # 8-byte Folded Reload +; CHECK-AIX-64-P9-NEXT: addi r1, r1, 144 +; CHECK-AIX-64-P9-NEXT: ld r0, 16(r1) +; CHECK-AIX-64-P9-NEXT: mtlr r0 +; CHECK-AIX-64-P9-NEXT: blr +entry: + %destination.addr = alloca ptr, align 8 + %source.addr = alloca ptr, align 8 + %num.addr = alloca i64, align 8 + store ptr %destination, ptr %destination.addr, align 8 + store ptr %source, ptr %source.addr, align 8 + store i64 %num, ptr %num.addr, align 8 + %0 = load ptr, ptr %destination.addr, align 8 + %1 = load ptr, ptr %source.addr, align 8 + %2 = load i64, ptr %num.addr, align 8 + call void @llvm.memmove.p0.p0.i64(ptr align 1 %0, ptr align 1 %1, i64 %2, i1 false) + ret ptr %0 +} + +declare void @llvm.memmove.p0.p0.i32(ptr writeonly captures(none), ptr readonly captures(none), i32, i1 immarg) diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll index ba6769b2aa3e1..0306bb18c2aed 100644 --- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll @@ -232,7 +232,7 @@ define i64 @uaddo3_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp { ret i64 %Q } -; TODO? CGP sinks the compare before we have a chance to form the overflow intrinsic. +; Ensure CGP doesn't sink the compare before we have a chance to form the overflow intrinsic. define i64 @uaddo4(i64 %a, i64 %b, i1 %c) nounwind ssp { ; RV32-LABEL: uaddo4: @@ -1076,41 +1076,37 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) { ; RV32-NEXT: .cfi_offset s4, -24 ; RV32-NEXT: .cfi_offset s5, -28 ; RV32-NEXT: .cfi_offset s6, -32 -; RV32-NEXT: mv s5, a5 -; RV32-NEXT: mv s3, a1 +; RV32-NEXT: mv s1, a5 +; RV32-NEXT: mv s4, a1 ; RV32-NEXT: andi a1, a5, 1 -; RV32-NEXT: beqz a1, .LBB32_8 +; RV32-NEXT: beqz a1, .LBB32_6 ; RV32-NEXT: # %bb.1: # %t ; RV32-NEXT: mv s0, a4 -; RV32-NEXT: mv s2, a3 -; RV32-NEXT: mv s1, a2 -; RV32-NEXT: mv s4, a0 -; RV32-NEXT: beq s3, a3, .LBB32_3 +; RV32-NEXT: mv s3, a3 +; RV32-NEXT: mv s2, a2 +; RV32-NEXT: mv s5, a0 +; RV32-NEXT: beq s4, a3, .LBB32_3 ; RV32-NEXT: # %bb.2: # %t -; RV32-NEXT: sltu s6, s3, s2 +; RV32-NEXT: sltu s6, s4, s3 ; RV32-NEXT: j .LBB32_4 ; RV32-NEXT: .LBB32_3: -; RV32-NEXT: sltu s6, s4, s1 +; RV32-NEXT: sltu s6, s5, s2 ; RV32-NEXT: .LBB32_4: # %t ; RV32-NEXT: mv a0, s6 ; RV32-NEXT: call call -; RV32-NEXT: beqz s6, .LBB32_8 +; RV32-NEXT: beqz s6, .LBB32_6 ; RV32-NEXT: # %bb.5: # %end -; RV32-NEXT: sltu a1, s4, s1 -; RV32-NEXT: mv a0, a1 -; RV32-NEXT: beq s3, s2, .LBB32_7 -; RV32-NEXT: # %bb.6: # %end -; RV32-NEXT: sltu a0, s3, s2 -; RV32-NEXT: .LBB32_7: # %end -; RV32-NEXT: sub a2, s3, s2 -; RV32-NEXT: sub a3, s4, s1 -; RV32-NEXT: sub a2, a2, a1 -; RV32-NEXT: sw a3, 0(s0) -; RV32-NEXT: sw a2, 4(s0) -; RV32-NEXT: j .LBB32_9 -; RV32-NEXT: .LBB32_8: # %f -; RV32-NEXT: mv a0, s5 -; RV32-NEXT: .LBB32_9: # %f +; RV32-NEXT: sltu a0, s5, s2 +; RV32-NEXT: sub a1, s4, s3 +; RV32-NEXT: sub a2, s5, s2 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: sw a2, 0(s0) +; RV32-NEXT: sw a1, 4(s0) +; RV32-NEXT: mv a0, s6 +; RV32-NEXT: j .LBB32_7 +; RV32-NEXT: .LBB32_6: # %f +; RV32-NEXT: mv a0, s1 +; RV32-NEXT: .LBB32_7: # %f ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/SPIRV/opencl/unpackhalf2x16-error.ll b/llvm/test/CodeGen/SPIRV/opencl/unpackhalf2x16-error.ll new file mode 100644 index 0000000000000..1d3ba2a38e55b --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/opencl/unpackhalf2x16-error.ll @@ -0,0 +1,11 @@ +; RUN: not llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o /dev/null 2>&1 | FileCheck %s +; RUN: not llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o /dev/null 2>&1 | FileCheck %s + +; CHECK: LLVM ERROR: %5:vfid(<2 x s64>) = nnan ninf nsz arcp afn reassoc G_INTRINSIC intrinsic(@llvm.spv.unpackhalf2x16), %0:iid(s64) is only supported with the GLSL extended instruction set. + +define hidden spir_func noundef nofpclass(nan inf) float @_Z9test_funcj(i32 noundef %0) local_unnamed_addr #0 { + %2 = tail call reassoc nnan ninf nsz arcp afn <2 x float> @llvm.spv.unpackhalf2x16.v2f32(i32 %0) + %3 = extractelement <2 x float> %2, i64 0 + ret float %3 +} + diff --git a/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll b/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll index 260394b658348..fb550bb01a3a2 100644 --- a/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll +++ b/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll @@ -7,9 +7,11 @@ entry: ; CHECK-SPIRV: OpDecorate %[[#PId:]] Volatile ; CHECK-SPIRV: OpDecorate %[[#PId]] FuncParamAttr NoAlias +; CHECK-SPIRV: OpDecorate %[[#PId]] FuncParamAttr NoWrite ; CHECK-SPIRV: %[[#PId]] = OpFunctionParameter %[[#]] !7 = !{!"volatile"} !8 = !{i32 38, i32 4} ; FuncParamAttr NoAlias -!9 = !{!8} +!11 = !{i32 38, i32 6} ; FuncParamAttr NoWrite +!9 = !{!8, !11} !10 = !{!9} diff --git a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll index 1962ddebc2115..f2b4c49b1dbcd 100644 --- a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll +++ b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll @@ -36,10 +36,10 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: xorl %edi, %edi ; CHECK-NEXT: testb %al, %al -; CHECK-NEXT: Ltmp0: +; CHECK-NEXT: Ltmp0: ## EH_LABEL ; CHECK-NEXT: ## implicit-def: $ebx ; CHECK-NEXT: calll __Znam -; CHECK-NEXT: Ltmp1: +; CHECK-NEXT: Ltmp1: ## EH_LABEL ; CHECK-NEXT: ## %bb.1: ## %bb11 ; CHECK-NEXT: movl %eax, %esi ; CHECK-NEXT: movb $1, %al @@ -58,13 +58,13 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr ; CHECK-NEXT: jne LBB0_9 ; CHECK-NEXT: ## %bb.10: ## %bb41 ; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1 -; CHECK-NEXT: Ltmp2: +; CHECK-NEXT: Ltmp2: ## EH_LABEL ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) ; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) ; CHECK-NEXT: movl %esi, (%esp) ; CHECK-NEXT: calll _Pjii -; CHECK-NEXT: Ltmp3: +; CHECK-NEXT: Ltmp3: ## EH_LABEL ; CHECK-NEXT: ## %bb.11: ## %bb42 ; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1 ; CHECK-NEXT: xorl %eax, %eax @@ -126,20 +126,20 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr ; CHECK-NEXT: decl {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill ; CHECK-NEXT: jmp LBB0_8 ; CHECK-NEXT: LBB0_18: ## %bb43 -; CHECK-NEXT: Ltmp5: +; CHECK-NEXT: Ltmp5: ## EH_LABEL ; CHECK-NEXT: movl %esi, %ebx ; CHECK-NEXT: calll _OnOverFlow -; CHECK-NEXT: Ltmp6: +; CHECK-NEXT: Ltmp6: ## EH_LABEL ; CHECK-NEXT: jmp LBB0_3 ; CHECK-NEXT: LBB0_2: ## %bb29 -; CHECK-NEXT: Ltmp7: +; CHECK-NEXT: Ltmp7: ## EH_LABEL ; CHECK-NEXT: movl %esi, %ebx ; CHECK-NEXT: calll _OnOverFlow -; CHECK-NEXT: Ltmp8: +; CHECK-NEXT: Ltmp8: ## EH_LABEL ; CHECK-NEXT: LBB0_3: ## %bb30 ; CHECK-NEXT: ud2 ; CHECK-NEXT: LBB0_4: ## %bb20.loopexit -; CHECK-NEXT: Ltmp4: +; CHECK-NEXT: Ltmp4: ## EH_LABEL ; CHECK-NEXT: LBB0_9: ; CHECK-NEXT: movl %esi, %ebx ; CHECK-NEXT: LBB0_6: ## %bb23 @@ -151,7 +151,7 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr ; CHECK-NEXT: popl %ebp ; CHECK-NEXT: retl ; CHECK-NEXT: LBB0_5: ## %bb20.loopexit.split-lp -; CHECK-NEXT: Ltmp9: +; CHECK-NEXT: Ltmp9: ## EH_LABEL ; CHECK-NEXT: jmp LBB0_6 ; CHECK-NEXT: Lfunc_end0: bb: diff --git a/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir b/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir index 348a2901ff6a4..24453066f2583 100644 --- a/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir +++ b/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir @@ -55,7 +55,7 @@ !9 = !DILocalVariable(name: "4", scope: !5, file: !1, line: 4, type: !10) !10 = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned) !11 = !DILocation(line: 4, column: 1, scope: !5) - !12 = distinct !DISubprogram(name: "test_2", linkageName: "test_2", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8) + !12 = distinct !DISubprogram(name: "test_2", linkageName: "test_2", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !7) ... --- diff --git a/llvm/test/CodeGen/X86/llvm.sincospi.ll b/llvm/test/CodeGen/X86/llvm.sincospi.ll new file mode 100644 index 0000000000000..5546c66deba30 --- /dev/null +++ b/llvm/test/CodeGen/X86/llvm.sincospi.ll @@ -0,0 +1,233 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=x86_64-apple-macosx10.9 < %s | FileCheck %s + +define { half, half } @test_sincospi_f16(half %a) #0 { +; CHECK-LABEL: test_sincospi_f16: +; CHECK: ## %bb.0: +; CHECK-NEXT: subq $40, %rsp +; CHECK-NEXT: pextrw $0, %xmm0, %eax +; CHECK-NEXT: movzwl %ax, %edi +; CHECK-NEXT: callq ___extendhfsf2 +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: callq ___truncsfhf2 +; CHECK-NEXT: ## kill: def $ax killed $ax def $eax +; CHECK-NEXT: pinsrw $0, %eax, %xmm0 +; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: callq ___truncsfhf2 +; CHECK-NEXT: ## kill: def $ax killed $ax def $eax +; CHECK-NEXT: pinsrw $0, %eax, %xmm1 +; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; CHECK-NEXT: addq $40, %rsp +; CHECK-NEXT: retq + %result = call { half, half } @llvm.sincospi.f16(half %a) + ret { half, half } %result +} + +define half @test_sincospi_f16_only_use_sin(half %a) #0 { +; CHECK-LABEL: test_sincospi_f16_only_use_sin: +; CHECK: ## %bb.0: +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: pextrw $0, %xmm0, %eax +; CHECK-NEXT: movzwl %ax, %edi +; CHECK-NEXT: callq ___extendhfsf2 +; CHECK-NEXT: movq %rsp, %rdi +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: callq ___truncsfhf2 +; CHECK-NEXT: ## kill: def $ax killed $ax def $eax +; CHECK-NEXT: pinsrw $0, %eax, %xmm0 +; CHECK-NEXT: popq %rax +; CHECK-NEXT: retq + %result = call { half, half } @llvm.sincospi.f16(half %a) + %result.0 = extractvalue { half, half } %result, 0 + ret half %result.0 +} + +define half @test_sincospi_f16_only_use_cos(half %a) #0 { +; CHECK-LABEL: test_sincospi_f16_only_use_cos: +; CHECK: ## %bb.0: +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: pextrw $0, %xmm0, %eax +; CHECK-NEXT: movzwl %ax, %edi +; CHECK-NEXT: callq ___extendhfsf2 +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: movq %rsp, %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: callq ___truncsfhf2 +; CHECK-NEXT: ## kill: def $ax killed $ax def $eax +; CHECK-NEXT: pinsrw $0, %eax, %xmm0 +; CHECK-NEXT: popq %rax +; CHECK-NEXT: retq + %result = call { half, half } @llvm.sincospi.f16(half %a) + %result.1 = extractvalue { half, half } %result, 1 + ret half %result.1 +} + +define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) #0 { +; CHECK-LABEL: test_sincospi_v2f16: +; CHECK: ## %bb.0: +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: subq $64, %rsp +; CHECK-NEXT: pextrw $0, %xmm0, %ebx +; CHECK-NEXT: psrld $16, %xmm0 +; CHECK-NEXT: pextrw $0, %xmm0, %eax +; CHECK-NEXT: movzwl %ax, %edi +; CHECK-NEXT: callq ___extendhfsf2 +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: movq %rsp, %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movzwl %bx, %edi +; CHECK-NEXT: callq ___extendhfsf2 +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: callq ___truncsfhf2 +; CHECK-NEXT: ## kill: def $ax killed $ax def $eax +; CHECK-NEXT: pinsrw $0, %eax, %xmm0 +; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: callq ___truncsfhf2 +; CHECK-NEXT: ## kill: def $ax killed $ax def $eax +; CHECK-NEXT: pinsrw $0, %eax, %xmm0 +; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: callq ___truncsfhf2 +; CHECK-NEXT: ## kill: def $ax killed $ax def $eax +; CHECK-NEXT: pinsrw $0, %eax, %xmm0 +; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: callq ___truncsfhf2 +; CHECK-NEXT: ## kill: def $ax killed $ax def $eax +; CHECK-NEXT: pinsrw $0, %eax, %xmm0 +; CHECK-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload +; CHECK-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload +; CHECK-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload +; CHECK-NEXT: ## xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3] +; CHECK-NEXT: addq $64, %rsp +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: retq + %result = call { <2 x half>, <2 x half> } @llvm.sincospi.v2f16(<2 x half> %a) + ret { <2 x half>, <2 x half> } %result +} + +define { float, float } @test_sincospi_f32(float %a) #0 { +; CHECK-LABEL: test_sincospi_f32: +; CHECK: ## %bb.0: +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: movq %rsp, %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: popq %rax +; CHECK-NEXT: retq + %result = call { float, float } @llvm.sincospi.f32(float %a) + ret { float, float } %result +} + +define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) #0 { +; CHECK-LABEL: test_sincospi_v2f32: +; CHECK: ## %bb.0: +; CHECK-NEXT: subq $40, %rsp +; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: movq %rsp, %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; CHECK-NEXT: addq $40, %rsp +; CHECK-NEXT: retq + %result = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> %a) + ret { <2 x float>, <2 x float> } %result +} + +define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) #0 { +; CHECK-LABEL: test_sincospi_v3f32: +; CHECK: ## %bb.0: +; CHECK-NEXT: subq $56, %rsp +; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi +; CHECK-NEXT: callq ___sincospif +; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; CHECK-NEXT: addq $56, %rsp +; CHECK-NEXT: retq + %result = call { <3 x float>, <3 x float> } @llvm.sincospi.v3f32(<3 x float> %a) + ret { <3 x float>, <3 x float> } %result +} + +define { double, double } @test_sincospi_f64(double %a) #0 { +; CHECK-LABEL: test_sincospi_f64: +; CHECK: ## %bb.0: +; CHECK-NEXT: subq $24, %rsp +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi +; CHECK-NEXT: callq ___sincospi +; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: addq $24, %rsp +; CHECK-NEXT: retq + %result = call { double, double } @llvm.sincospi.f64(double %a) + ret { double, double } %result +} + +define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) #0 { +; CHECK-LABEL: test_sincospi_v2f64: +; CHECK: ## %bb.0: +; CHECK-NEXT: subq $56, %rsp +; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi +; CHECK-NEXT: callq ___sincospi +; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi +; CHECK-NEXT: movq %rsp, %rsi +; CHECK-NEXT: callq ___sincospi +; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] +; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1] +; CHECK-NEXT: addq $56, %rsp +; CHECK-NEXT: retq + %result = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> %a) + ret { <2 x double>, <2 x double> } %result +} + +attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/X86/pr166534.ll b/llvm/test/CodeGen/X86/pr166534.ll index aef44cc3e40d0..162a0c93bfcf4 100644 --- a/llvm/test/CodeGen/X86/pr166534.ll +++ b/llvm/test/CodeGen/X86/pr166534.ll @@ -7,100 +7,64 @@ define void @pr166534(ptr %pa, ptr %pb, ptr %pc, ptr %pd) { ; SSE2-LABEL: pr166534: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movq (%rdi), %rax -; SSE2-NEXT: movq 8(%rdi), %r8 ; SSE2-NEXT: movdqu (%rdi), %xmm0 -; SSE2-NEXT: movq (%rsi), %r9 -; SSE2-NEXT: movq 8(%rsi), %rdi ; SSE2-NEXT: movdqu (%rsi), %xmm1 ; SSE2-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE2-NEXT: pmovmskb %xmm1, %esi -; SSE2-NEXT: xorl %r10d, %r10d +; SSE2-NEXT: xorl %eax, %eax ; SSE2-NEXT: cmpl $65535, %esi # imm = 0xFFFF -; SSE2-NEXT: sete %r10b -; SSE2-NEXT: orq %r10, (%rdx) +; SSE2-NEXT: sete %al +; SSE2-NEXT: orq %rax, (%rdx) ; SSE2-NEXT: cmpl $65535, %esi # imm = 0xFFFF ; SSE2-NEXT: jne .LBB0_2 ; SSE2-NEXT: # %bb.1: # %if.then -; SSE2-NEXT: xorq %r9, %rax -; SSE2-NEXT: xorq %rdi, %r8 -; SSE2-NEXT: xorl %edx, %edx -; SSE2-NEXT: orq %rax, %r8 -; SSE2-NEXT: sete %dl -; SSE2-NEXT: orq %rdx, (%rcx) +; SSE2-NEXT: orq %rax, (%rcx) ; SSE2-NEXT: .LBB0_2: # %if.end ; SSE2-NEXT: retq ; ; SSE4-LABEL: pr166534: ; SSE4: # %bb.0: # %entry -; SSE4-NEXT: movq (%rdi), %rax -; SSE4-NEXT: movq 8(%rdi), %r8 ; SSE4-NEXT: movdqu (%rdi), %xmm0 -; SSE4-NEXT: movq (%rsi), %r9 -; SSE4-NEXT: movq 8(%rsi), %rdi ; SSE4-NEXT: movdqu (%rsi), %xmm1 ; SSE4-NEXT: pxor %xmm0, %xmm1 -; SSE4-NEXT: xorl %esi, %esi +; SSE4-NEXT: xorl %eax, %eax ; SSE4-NEXT: ptest %xmm1, %xmm1 -; SSE4-NEXT: sete %sil -; SSE4-NEXT: orq %rsi, (%rdx) +; SSE4-NEXT: sete %al +; SSE4-NEXT: orq %rax, (%rdx) ; SSE4-NEXT: ptest %xmm1, %xmm1 ; SSE4-NEXT: jne .LBB0_2 ; SSE4-NEXT: # %bb.1: # %if.then -; SSE4-NEXT: xorq %r9, %rax -; SSE4-NEXT: xorq %rdi, %r8 -; SSE4-NEXT: xorl %edx, %edx -; SSE4-NEXT: orq %rax, %r8 -; SSE4-NEXT: sete %dl -; SSE4-NEXT: orq %rdx, (%rcx) +; SSE4-NEXT: orq %rax, (%rcx) ; SSE4-NEXT: .LBB0_2: # %if.end ; SSE4-NEXT: retq ; ; AVX2-LABEL: pr166534: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: movq (%rdi), %rax -; AVX2-NEXT: movq 8(%rdi), %r8 ; AVX2-NEXT: vmovdqu (%rdi), %xmm0 -; AVX2-NEXT: movq (%rsi), %rdi ; AVX2-NEXT: vpxor (%rsi), %xmm0, %xmm0 -; AVX2-NEXT: movq 8(%rsi), %rsi -; AVX2-NEXT: xorl %r9d, %r9d +; AVX2-NEXT: xorl %eax, %eax ; AVX2-NEXT: vptest %xmm0, %xmm0 -; AVX2-NEXT: sete %r9b -; AVX2-NEXT: orq %r9, (%rdx) +; AVX2-NEXT: sete %al +; AVX2-NEXT: orq %rax, (%rdx) ; AVX2-NEXT: vptest %xmm0, %xmm0 ; AVX2-NEXT: jne .LBB0_2 ; AVX2-NEXT: # %bb.1: # %if.then -; AVX2-NEXT: xorq %rdi, %rax -; AVX2-NEXT: xorq %rsi, %r8 -; AVX2-NEXT: xorl %edx, %edx -; AVX2-NEXT: orq %rax, %r8 -; AVX2-NEXT: sete %dl -; AVX2-NEXT: orq %rdx, (%rcx) +; AVX2-NEXT: orq %rax, (%rcx) ; AVX2-NEXT: .LBB0_2: # %if.end ; AVX2-NEXT: retq ; ; AVX512-LABEL: pr166534: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: movq (%rdi), %rax -; AVX512-NEXT: movq 8(%rdi), %r8 ; AVX512-NEXT: vmovdqu (%rdi), %xmm0 -; AVX512-NEXT: movq (%rsi), %r9 -; AVX512-NEXT: movq 8(%rsi), %rdi ; AVX512-NEXT: vpxor (%rsi), %xmm0, %xmm0 -; AVX512-NEXT: xorl %esi, %esi +; AVX512-NEXT: xorl %eax, %eax ; AVX512-NEXT: vptest %xmm0, %xmm0 -; AVX512-NEXT: sete %sil -; AVX512-NEXT: orq %rsi, (%rdx) +; AVX512-NEXT: sete %al +; AVX512-NEXT: orq %rax, (%rdx) ; AVX512-NEXT: vptest %xmm0, %xmm0 ; AVX512-NEXT: jne .LBB0_2 ; AVX512-NEXT: # %bb.1: # %if.then -; AVX512-NEXT: xorq %r9, %rax -; AVX512-NEXT: xorq %rdi, %r8 -; AVX512-NEXT: xorl %edx, %edx -; AVX512-NEXT: orq %rax, %r8 -; AVX512-NEXT: sete %dl -; AVX512-NEXT: orq %rdx, (%rcx) +; AVX512-NEXT: orq %rax, (%rcx) ; AVX512-NEXT: .LBB0_2: # %if.end ; AVX512-NEXT: retq entry: diff --git a/llvm/test/DebugInfo/AMDGPU/heterogeneous-dwarf-diop-diexpression-address-spaces.ll b/llvm/test/DebugInfo/AMDGPU/heterogeneous-dwarf-diop-diexpression-address-spaces.ll index 60a055ad66b61..21e0c572dec9b 100644 --- a/llvm/test/DebugInfo/AMDGPU/heterogeneous-dwarf-diop-diexpression-address-spaces.ll +++ b/llvm/test/DebugInfo/AMDGPU/heterogeneous-dwarf-diop-diexpression-address-spaces.ll @@ -1,5 +1,5 @@ ; RUN: llc -O0 -mcpu=gfx1030 -mtriple=amdgcn-amd-amdhsa -filetype=obj -o - < %s | llvm-dwarfdump --debug-info - | FileCheck %s - +; XFAIL: * ; CHECK-LABEL: DW_AT_name ("test_loc_single") define void @test_loc_single(ptr addrspace(3) %ptr) #0 !dbg !9 { ; Verify that the right address class attribute is attached to the variable's diff --git a/llvm/test/DebugInfo/MIR/X86/clobbered-fragments.mir b/llvm/test/DebugInfo/MIR/X86/clobbered-fragments.mir index a334e99b9cade..ea01835cae1e5 100644 --- a/llvm/test/DebugInfo/MIR/X86/clobbered-fragments.mir +++ b/llvm/test/DebugInfo/MIR/X86/clobbered-fragments.mir @@ -85,10 +85,11 @@ !15 = !DISubrange(count: 3) !16 = !DILocation(line: 8, scope: !8) !17 = !DILocation(line: 9, scope: !8) - !18 = distinct !DISubprogram(name: "test2", scope: !2, file: !2, line: 7, type: !9, scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !1, retainedNodes: !11) + !18 = distinct !DISubprogram(name: "test2", scope: !2, file: !2, line: 7, type: !9, scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !1, retainedNodes: !22) !19 = !DILocalVariable(name: "local", scope: !18, file: !2, line: 8, type: !13) !20 = !DILocation(line: 15, scope: !18) !21 = !DILocation(line: 16, scope: !18) + !22 = !{!19} ... --- diff --git a/llvm/test/DebugInfo/MIR/X86/machine-cse.mir b/llvm/test/DebugInfo/MIR/X86/machine-cse.mir index c38c0a1a79f75..63dc44fb705fe 100644 --- a/llvm/test/DebugInfo/MIR/X86/machine-cse.mir +++ b/llvm/test/DebugInfo/MIR/X86/machine-cse.mir @@ -73,13 +73,14 @@ !0 = !{i32 2, !"Debug Info Version", i32 3} !1 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !2, producer: "beards", isOptimized: true, runtimeVersion: 4, emissionKind: FullDebug) !2 = !DIFile(filename: "bees.cpp", directory: "") - !3 = distinct !DISubprogram(name: "nope", scope: !1, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !8) - !33 = distinct !DISubprogram(name: "alsonope", scope: !1, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !8) + !3 = distinct !DISubprogram(name: "nope", scope: !1, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !9) + !33 = distinct !DISubprogram(name: "alsonope", scope: !1, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !9) !4 = !DILocalVariable(name: "bees", scope: !3, type: !5) !5 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !6, size: 64) !6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) !7 = !DILocation(line: 0, scope: !3) !8 = !{!4} + !9 = !{} ; CHECK: ![[METAVAR:[0-9]+]] = !DILocalVariable(name: "bees", diff --git a/llvm/test/DebugInfo/MIR/X86/remove-redundant-dbg-vals.mir b/llvm/test/DebugInfo/MIR/X86/remove-redundant-dbg-vals.mir index 06ce18d8edaa7..28fc044e606b5 100644 --- a/llvm/test/DebugInfo/MIR/X86/remove-redundant-dbg-vals.mir +++ b/llvm/test/DebugInfo/MIR/X86/remove-redundant-dbg-vals.mir @@ -139,15 +139,15 @@ !23 = !DISubprogram(name: "bar", scope: !1, file: !1, line: 1, type: !24, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized, retainedNodes: !2) !24 = !DISubroutineType(types: !25) !25 = !{null, !11} - !26 = distinct !DISubprogram(name: "foo2", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12) + !26 = distinct !DISubprogram(name: "foo2", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) !27 = !DILocation(line: 0, scope: !26) - !28 = distinct !DISubprogram(name: "foo3", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12) + !28 = distinct !DISubprogram(name: "foo3", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) !29 = !DILocation(line: 0, scope: !28) - !30 = distinct !DISubprogram(name: "foo4", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12) + !30 = distinct !DISubprogram(name: "foo4", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) !31 = !DILocation(line: 0, scope: !30) - !32 = distinct !DISubprogram(name: "foo5", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12) + !32 = distinct !DISubprogram(name: "foo5", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) !33 = !DILocation(line: 0, scope: !32) - !34 = distinct !DISubprogram(name: "foo6", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12) + !34 = distinct !DISubprogram(name: "foo6", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) !35 = !DILocation(line: 0, scope: !34) ... diff --git a/llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll b/llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll index dbbef2b39587d..594607c6e95d8 100644 --- a/llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll +++ b/llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll @@ -281,15 +281,19 @@ lala: !11 = !{!13} !13 = !DILocalVariable(name: "baz", scope: !7, file: !1, line: 6, type: !10) !14 = !DILocation(line: 1, scope: !7) -!20 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !11) +!20 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !23) !21 = !DILocalVariable(name: "xyzzy", scope: !20, file: !1, line: 6, type: !10) !22 = !DILocation(line: 1, scope: !20) -!30 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !11) +!23 = !{!21} +!30 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !33) !31 = !DILocalVariable(name: "xyzzy", scope: !30, file: !1, line: 6, type: !10) !32 = !DILocation(line: 1, scope: !30) -!40 = distinct !DISubprogram(name: "qux", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !11) +!33 = !{!31} +!40 = distinct !DISubprogram(name: "qux", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !46) !41 = !DILocalVariable(name: "socks", scope: !40, file: !1, line: 6, type: !10) !42 = !DILocation(line: 1, scope: !40) -!43 = distinct !DISubprogram(name: "inlined", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !11) +!43 = distinct !DISubprogram(name: "inlined", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !47) !44 = !DILocation(line: 0, scope: !43, inlinedAt: !42) !45 = !DILocalVariable(name: "knees", scope: !43, file: !1, line: 6, type: !10) +!46 = !{!41} +!47 = !{!45} diff --git a/llvm/test/DebugInfo/X86/live-debug-values-constprop.mir b/llvm/test/DebugInfo/X86/live-debug-values-constprop.mir index 8a0537658c9c0..2900f0bdcf864 100644 --- a/llvm/test/DebugInfo/X86/live-debug-values-constprop.mir +++ b/llvm/test/DebugInfo/X86/live-debug-values-constprop.mir @@ -82,15 +82,18 @@ !14 = !DISubroutineType(types: !15) !15 = !{!16} !16 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed) - !40 = distinct !DISubprogram(name: "bar", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !13, type: !14, isDefinition: true) + !40 = distinct !DISubprogram(name: "bar", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !43, type: !14, isDefinition: true) !41 = !DILocalVariable(name: "towel", scope: !40, file: !2, line: 1, type: !16) !42 = !DILocation(line: 40, scope: !40) - !80 = distinct !DISubprogram(name: "baz", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !13, type: !14, isDefinition: true) + !43 = !{!41} + !80 = distinct !DISubprogram(name: "baz", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !83, type: !14, isDefinition: true) !81 = !DILocalVariable(name: "socks", scope: !80, file: !2, line: 1, type: !16) !82 = !DILocation(line: 40, scope: !80) - !120 = distinct !DISubprogram(name: "qux", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !13, type: !14, isDefinition: true) + !83 = !{!81} + !120 = distinct !DISubprogram(name: "qux", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !123, type: !14, isDefinition: true) !121 = !DILocalVariable(name: "shoes", scope: !120, file: !2, line: 1, type: !16) !122 = !DILocation(line: 40, scope: !120) + !123 = !{!121} ... --- diff --git a/llvm/test/DebugInfo/X86/live-debug-values-remove-range.ll b/llvm/test/DebugInfo/X86/live-debug-values-remove-range.ll index e656c6237c068..145b5045687cf 100644 --- a/llvm/test/DebugInfo/X86/live-debug-values-remove-range.ll +++ b/llvm/test/DebugInfo/X86/live-debug-values-remove-range.ll @@ -108,6 +108,6 @@ exit: !106 = !DILocation(line: 1, scope: !104) !113 = !{!103} !203 = !DILocalVariable(name: "teacake", scope: !204, file: !2, line: 1, type: !16) -!204 = distinct !DISubprogram(name: "toad", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !113, type: !14, isDefinition: true) +!204 = distinct !DISubprogram(name: "toad", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !213, type: !14, isDefinition: true) !206 = !DILocation(line: 1, scope: !204) !213 = !{!203} diff --git a/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir b/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir index 3beaf8996e4f0..ab57a9612702f 100644 --- a/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir +++ b/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir @@ -91,7 +91,7 @@ !10 = !{!11} !11 = !DILocalVariable(name: "x", arg: 1, scope: !6, file: !1, line: 3, type: !9) !12 = !DILocation(line: 3, column: 12, scope: !6) - !13 = distinct !DISubprogram(name: "f2", scope: !1, file: !1, line: 20, type: !7, scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !10) + !13 = distinct !DISubprogram(name: "f2", scope: !1, file: !1, line: 20, type: !7, scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !{!14}) !14 = !DILocalVariable(name: "x", arg: 1, scope: !13, file: !1, line: 21, type: !9) !15 = !DILocation(line: 23, column: 12, scope: !13) diff --git a/llvm/test/Linker/thinlto_funcimport_debug.ll b/llvm/test/Linker/thinlto_funcimport_debug.ll index 294b3a773ef51..4454a56c40ef7 100644 --- a/llvm/test/Linker/thinlto_funcimport_debug.ll +++ b/llvm/test/Linker/thinlto_funcimport_debug.ll @@ -80,8 +80,8 @@ attributes #1 = { nounwind readnone } !26 = !DILocation(line: 9, column: 3, scope: !4) !27 = distinct !DISubprogram(name: "func3", scope: !1, file: !1, line: 8, type: !5, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !28) !28 = !{!29} -!29 = !DILocalVariable(name: "n", arg: 1, scope: !30, file: !1, line: 8, type: !7) +!29 = !DILocalVariable(name: "n", arg: 1, scope: !27, file: !1, line: 8, type: !33) !30 = distinct !DISubprogram(name: "func4", scope: !1, file: !1, line: 8, type: !5, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !31) !31 = !{!32} !32 = !DILocalVariable(name: "n", arg: 1, scope: !30, file: !1, line: 8, type: !7) - +!33 = !DIDerivedType(tag: DW_TAG_typedef, name: "size_t", scope: !30, file: !1, line: 13, baseType: !7) diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll index eb2fb4f4774d8..ab01bbf20de71 100644 --- a/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll +++ b/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll @@ -96,11 +96,11 @@ entry: !13 = !DILocalVariable(name: "v", arg: 1, scope: !8, file: !1, line: 3, type: !11) !14 = !DILocation(line: 5, column: 10, scope: !8) !15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 9, column: 7) -!16 = distinct !DISubprogram(name: "callee", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!16 = distinct !DISubprogram(name: "callee", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !17 = !DILocation(line: 10, column: 7, scope: !15) -!18 = distinct !DISubprogram(name: "callee2", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!18 = distinct !DISubprogram(name: "callee2", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !19 = distinct !DILexicalBlock(scope: !18, file: !1, line: 100, column: 1) !20 = !DILocation(line: 110, column: 17, scope: !19) -!21 = distinct !DISubprogram(name: "caller2", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!21 = distinct !DISubprogram(name: "caller2", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !22 = !DILocation(line: 110, column: 17, scope: !21) !23 = !DILocation(line: 15, column: 7, scope: !15) diff --git a/llvm/test/Transforms/HotColdSplit/split-out-dbg-label.ll b/llvm/test/Transforms/HotColdSplit/split-out-dbg-label.ll index da6c19d604c7c..76406ddea6b9f 100644 --- a/llvm/test/Transforms/HotColdSplit/split-out-dbg-label.ll +++ b/llvm/test/Transforms/HotColdSplit/split-out-dbg-label.ll @@ -66,7 +66,7 @@ define void @inline_me() !dbg !13 { !10 = !DIBasicType(name: "ty32", size: 32, encoding: DW_ATE_unsigned) !11 = !DILocation(line: 1, column: 1, scope: !6) !12 = !DILabel(scope: !6, name: "bye", file: !1, line: 28) -!13 = distinct !DISubprogram(name: "inline_me", linkageName: "inline_me", scope: null, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !8) +!13 = distinct !DISubprogram(name: "inline_me", linkageName: "inline_me", scope: null, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !2) !14 = !DILabel(scope: !13, name: "label_in_@inline_me", file: !1, line: 29) !15 = !DILocation(line: 2, column: 2, scope: !13, inlinedAt: !11) !16 = !DILabel(scope: !17, name: "scoped_label_in_foo", file: !1, line: 30) diff --git a/llvm/test/Transforms/HotColdSplit/transfer-debug-info.ll b/llvm/test/Transforms/HotColdSplit/transfer-debug-info.ll index 3f69f0c200dad..f9dd9eaf01422 100644 --- a/llvm/test/Transforms/HotColdSplit/transfer-debug-info.ll +++ b/llvm/test/Transforms/HotColdSplit/transfer-debug-info.ll @@ -106,7 +106,7 @@ define void @inline_me() !dbg !12{ !9 = !DILocalVariable(name: "1", scope: !6, file: !1, line: 1, type: !10) !10 = !DIBasicType(name: "ty32", size: 32, encoding: DW_ATE_unsigned) !11 = !DILocation(line: 1, column: 1, scope: !6) -!12 = distinct !DISubprogram(name: "inline_me", linkageName: "inline_me", scope: null, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !8) +!12 = distinct !DISubprogram(name: "inline_me", linkageName: "inline_me", scope: null, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !2) !13 = !DILocation(line: 2, column: 2, scope: !12, inlinedAt: !14) !14 = !DILocation(line: 3, column: 3, scope: !15) !15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 4, column: 4) diff --git a/llvm/test/Transforms/InstCombine/debuginfo-dce.ll b/llvm/test/Transforms/InstCombine/debuginfo-dce.ll index c1d7c30e936f2..ec90779d0acce 100644 --- a/llvm/test/Transforms/InstCombine/debuginfo-dce.ll +++ b/llvm/test/Transforms/InstCombine/debuginfo-dce.ll @@ -125,15 +125,15 @@ attributes #1 = { nounwind readnone } !19 = !DILocation(line: 6, column: 17, scope: !14) !20 = !DIExpression(DW_OP_plus_uconst, 0) !21 = !DILocation(line: 11, column: 1, scope: !14) -!22 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !17) +!22 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !23 = !DILocation(line: 6, column: 17, scope: !22) !24 = !DILocalVariable(name: "entry", scope: !22, file: !1, line: 6, type: !4) -!25 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !17) +!25 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !26 = !DILocation(line: 6, column: 17, scope: !25) !27 = !DILocalVariable(name: "entry", scope: !25, file: !1, line: 6, type: !4) -!28 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !17) +!28 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !29 = !DILocation(line: 6, column: 17, scope: !28) !30 = !DILocalVariable(name: "entry", scope: !28, file: !1, line: 6, type: !4) -!31 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !17) +!31 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !32 = !DILocation(line: 6, column: 17, scope: !31) !33 = !DILocalVariable(name: "entry", scope: !31, file: !1, line: 6, type: !4) diff --git a/llvm/test/Transforms/LCSSA/rewrite-existing-dbg-values.ll b/llvm/test/Transforms/LCSSA/rewrite-existing-dbg-values.ll index 437e56665d53b..fa8357505e7e9 100644 --- a/llvm/test/Transforms/LCSSA/rewrite-existing-dbg-values.ll +++ b/llvm/test/Transforms/LCSSA/rewrite-existing-dbg-values.ll @@ -131,7 +131,8 @@ declare void @llvm.dbg.value(metadata, metadata, metadata) !10 = !DILexicalBlockFile(scope: !6, file: !1, discriminator: 0) !11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) !12 = !DILocation(line: 0, scope: !10) -!13 = distinct !DISubprogram(name: "multi_exit", scope: !1, file: !1, line: 10, type: !7, scopeLine: 10, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8) +!13 = distinct !DISubprogram(name: "multi_exit", scope: !1, file: !1, line: 10, type: !7, scopeLine: 10, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !17) !14 = !DILocation(line: 0, scope: !15) !15 = !DILexicalBlockFile(scope: !13, file: !1, discriminator: 0) !16 = !DILocalVariable(name: "sum2", scope: !15, file: !1, line: 11, type: !11) +!17 = !{!16} diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll index 9003072f5fcdf..dd347a7a6519d 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll @@ -19,9 +19,8 @@ define void @mulvl123_addressing(ptr %src, ptr %dst, i64 %count) #0 { ; COMMON-NEXT: ldr z3, [x0, #3, mul vl] ; COMMON-NEXT: addvl x0, x0, #5 ; COMMON-NEXT: umax z0.b, p0/m, z0.b, z1.b -; COMMON-NEXT: movprfx z1, z2 -; COMMON-NEXT: umax z1.b, p0/m, z1.b, z3.b -; COMMON-NEXT: umax z0.b, p0/m, z0.b, z1.b +; COMMON-NEXT: umax z2.b, p0/m, z2.b, z3.b +; COMMON-NEXT: umax z0.b, p0/m, z0.b, z2.b ; COMMON-NEXT: st1b { z0.b }, p0, [x1, x8] ; COMMON-NEXT: incb x8 ; COMMON-NEXT: cmp x8, x2 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll index f16351720b20f..2f7e3568d5654 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll @@ -429,48 +429,36 @@ define i32 @header_mask_and_invariant_compare(ptr %A, ptr %B, ptr %C, ptr %D, pt ; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; DEFAULT-NEXT: br label %[[VECTOR_BODY:.*]] ; DEFAULT: [[VECTOR_BODY]]: -; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE37:.*]] ] -; DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META8:![0-9]+]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT28:%.*]] = insertelement <4 x i32> poison, i32 [[TMP9]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT29:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT28]], <4 x i32> poison, <4 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP19:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META11:![0-9]+]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP19]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP6:%.*]] = or <4 x i32> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT29]] -; DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META13:![0-9]+]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT30:%.*]] = insertelement <4 x i32> poison, i32 [[TMP7]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT31:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT30]], <4 x i32> poison, <4 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP8:%.*]] = icmp ugt <4 x i32> [[BROADCAST_SPLAT31]], [[TMP6]] +; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE33:.*]] ] +; DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META8:![0-9]+]] +; DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META11:![0-9]+]] +; DEFAULT-NEXT: [[TMP5:%.*]] = or i32 [[TMP4]], [[TMP3]] +; DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META13:![0-9]+]] +; DEFAULT-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP5]] +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP7]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer ; DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[D]], i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0 -; DEFAULT-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] ; DEFAULT: [[PRED_STORE_IF]]: -; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0 -; DEFAULT-NEXT: store i32 [[TMP11]], ptr [[E]], align 4, !alias.scope [[META15:![0-9]+]], !noalias [[META17:![0-9]+]] +; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15:![0-9]+]], !noalias [[META17:![0-9]+]] ; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE]] ; DEFAULT: [[PRED_STORE_CONTINUE]]: -; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP8]], i32 1 -; DEFAULT-NEXT: br i1 [[TMP12]], label %[[PRED_STORE_IF32:.*]], label %[[PRED_STORE_CONTINUE33:.*]] +; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF28:.*]], label %[[PRED_STORE_CONTINUE29:.*]] +; DEFAULT: [[PRED_STORE_IF28]]: +; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]] +; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE29]] +; DEFAULT: [[PRED_STORE_CONTINUE29]]: +; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF30:.*]], label %[[PRED_STORE_CONTINUE31:.*]] +; DEFAULT: [[PRED_STORE_IF30]]: +; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]] +; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE31]] +; DEFAULT: [[PRED_STORE_CONTINUE31]]: +; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF32:.*]], label %[[PRED_STORE_CONTINUE33]] ; DEFAULT: [[PRED_STORE_IF32]]: -; DEFAULT-NEXT: [[TMP13:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0 -; DEFAULT-NEXT: store i32 [[TMP13]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]] +; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]] ; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE33]] ; DEFAULT: [[PRED_STORE_CONTINUE33]]: -; DEFAULT-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP8]], i32 2 -; DEFAULT-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF34:.*]], label %[[PRED_STORE_CONTINUE35:.*]] -; DEFAULT: [[PRED_STORE_IF34]]: -; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0 -; DEFAULT-NEXT: store i32 [[TMP15]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]] -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE35]] -; DEFAULT: [[PRED_STORE_CONTINUE35]]: -; DEFAULT-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP8]], i32 3 -; DEFAULT-NEXT: br i1 [[TMP21]], label %[[PRED_STORE_IF36:.*]], label %[[PRED_STORE_CONTINUE37]] -; DEFAULT: [[PRED_STORE_IF36]]: -; DEFAULT-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0 -; DEFAULT-NEXT: store i32 [[TMP22]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]] -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE37]] -; DEFAULT: [[PRED_STORE_CONTINUE37]]: -; DEFAULT-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[TMP16]], <4 x i1> [[TMP8]]), !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]] +; DEFAULT-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[TMP16]], <4 x i1> [[BROADCAST_SPLAT]]), !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]] ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; DEFAULT-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] @@ -613,63 +601,17 @@ exit: define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) { ; COMMON-LABEL: define void @low_trip_count_fold_tail_scalarized_store( ; COMMON-SAME: ptr [[DST:%.*]]) { -; COMMON-NEXT: [[ENTRY:.*:]] -; COMMON-NEXT: br label %[[VECTOR_PH:.*]] -; COMMON: [[VECTOR_PH]]: -; COMMON-NEXT: br label %[[VECTOR_BODY:.*]] -; COMMON: [[VECTOR_BODY]]: -; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; COMMON: [[PRED_STORE_IF]]: -; COMMON-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[DST]], i64 0 -; COMMON-NEXT: store i8 0, ptr [[TMP0]], align 1 -; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE]] -; COMMON: [[PRED_STORE_CONTINUE]]: -; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]] -; COMMON: [[PRED_STORE_IF1]]: -; COMMON-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[DST]], i64 1 -; COMMON-NEXT: store i8 1, ptr [[TMP1]], align 1 -; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE2]] -; COMMON: [[PRED_STORE_CONTINUE2]]: -; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]] -; COMMON: [[PRED_STORE_IF3]]: -; COMMON-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i64 2 -; COMMON-NEXT: store i8 2, ptr [[TMP2]], align 1 -; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE4]] -; COMMON: [[PRED_STORE_CONTINUE4]]: -; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]] -; COMMON: [[PRED_STORE_IF5]]: -; COMMON-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[DST]], i64 3 -; COMMON-NEXT: store i8 3, ptr [[TMP3]], align 1 -; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE6]] -; COMMON: [[PRED_STORE_CONTINUE6]]: -; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] -; COMMON: [[PRED_STORE_IF7]]: -; COMMON-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[DST]], i64 4 -; COMMON-NEXT: store i8 4, ptr [[TMP4]], align 1 -; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE8]] -; COMMON: [[PRED_STORE_CONTINUE8]]: -; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] -; COMMON: [[PRED_STORE_IF9]]: -; COMMON-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[DST]], i64 5 -; COMMON-NEXT: store i8 5, ptr [[TMP5]], align 1 -; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE10]] -; COMMON: [[PRED_STORE_CONTINUE10]]: -; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]] -; COMMON: [[PRED_STORE_IF11]]: -; COMMON-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 6 -; COMMON-NEXT: store i8 6, ptr [[TMP6]], align 1 -; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE12]] -; COMMON: [[PRED_STORE_CONTINUE12]]: -; COMMON-NEXT: br i1 false, label %[[PRED_STORE_IF13:.*]], label %[[EXIT:.*]] -; COMMON: [[PRED_STORE_IF13]]: -; COMMON-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], i64 7 -; COMMON-NEXT: store i8 7, ptr [[TMP7]], align 1 -; COMMON-NEXT: br label %[[EXIT]] +; COMMON-NEXT: [[ENTRY:.*]]: +; COMMON-NEXT: br label %[[LOOP:.*]] +; COMMON: [[LOOP]]: +; COMMON-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; COMMON-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i8 +; COMMON-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] +; COMMON-NEXT: store i8 [[IV_TRUNC]], ptr [[GEP]], align 1 +; COMMON-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; COMMON-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 7 +; COMMON-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] ; COMMON: [[EXIT]]: -; COMMON-NEXT: br label %[[SCALAR_PH:.*]] -; COMMON: [[SCALAR_PH]]: -; COMMON-NEXT: br label %[[EXIT1:.*]] -; COMMON: [[EXIT1]]: ; COMMON-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll b/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll index 5970608794b55..bea34e29e3530 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll @@ -16,7 +16,7 @@ ; CM: vector.ph: ; CM: CLONE ir<%a> = extractvalue ir<%sv> ; CM: CLONE ir<%b> = extractvalue ir<%sv> -; CM: WIDEN ir<%add> = add ir<%a>, ir<%b> +; CM: CLONE ir<%add> = add ir<%a>, ir<%b> ; CM: Successor(s): vector loop ; CM: LV: Scalar loop costs: 5. @@ -30,23 +30,22 @@ define void @test1(ptr %dst, {i64, i64} %sv) { ; FORCED-NEXT: br label %[[VECTOR_PH:.*]] ; FORCED: [[VECTOR_PH]]: ; FORCED-NEXT: [[TMP0:%.*]] = extractvalue { i64, i64 } [[SV]], 0 -; FORCED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP0]], i64 0 -; FORCED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer ; FORCED-NEXT: [[TMP4:%.*]] = extractvalue { i64, i64 } [[SV]], 1 -; FORCED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP4]], i64 0 +; FORCED-NEXT: [[TMP5:%.*]] = add i64 [[TMP0]], [[TMP4]] +; FORCED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP5]], i64 0 ; FORCED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer -; FORCED-NEXT: [[TMP1:%.*]] = add <2 x i64> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT2]] ; FORCED-NEXT: br label %[[VECTOR_BODY:.*]] ; FORCED: [[VECTOR_BODY]]: ; FORCED-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]] -; FORCED-NEXT: store <2 x i64> [[TMP1]], ptr [[TMP2]], align 4 +; FORCED-NEXT: store <2 x i64> [[BROADCAST_SPLAT2]], ptr [[TMP2]], align 4 ; FORCED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; FORCED-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; FORCED: [[MIDDLE_BLOCK]]: -; FORCED-NEXT: br [[EXIT:label %.*]] -; FORCED: [[SCALAR_PH:.*:]] +; FORCED-NEXT: br label %[[EXIT:.*]] +; FORCED: [[EXIT]]: +; FORCED-NEXT: ret void ; entry: br label %loop.body @@ -99,10 +98,11 @@ define void @test_getVectorCallCost(ptr %dst, {float, float} %sv) { ; FORCED-NEXT: store <2 x float> [[TMP2]], ptr [[TMP1]], align 4 ; FORCED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; FORCED-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FORCED: [[MIDDLE_BLOCK]]: -; FORCED-NEXT: br [[EXIT:label %.*]] -; FORCED: [[SCALAR_PH:.*:]] +; FORCED-NEXT: br label %[[EXIT:.*]] +; FORCED: [[EXIT]]: +; FORCED-NEXT: ret void ; entry: br label %loop.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll index cfc6cc87a2a21..4b097ba2422e4 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll @@ -271,69 +271,11 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 { ; ; PRED-LABEL: define void @iv_trunc( ; PRED-SAME: i32 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; PRED-NEXT: [[ENTRY:.*:]] +; PRED-NEXT: [[ENTRY:.*]]: ; PRED-NEXT: [[MUL_X:%.*]] = add i32 [[X]], 1 -; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 -; PRED-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; PRED: [[VECTOR_SCEVCHECK]]: -; PRED-NEXT: [[TMP1:%.*]] = sub i32 -1, [[X]] -; PRED-NEXT: [[TMP2:%.*]] = icmp slt i32 [[MUL_X]], 0 -; PRED-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 [[MUL_X]] -; PRED-NEXT: [[TMP4:%.*]] = trunc i64 [[N]] to i32 -; PRED-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP3]], i32 [[TMP4]]) -; PRED-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 -; PRED-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 -; PRED-NEXT: [[TMP5:%.*]] = sub i32 0, [[MUL_RESULT]] -; PRED-NEXT: [[TMP6:%.*]] = icmp ugt i32 [[TMP5]], 0 -; PRED-NEXT: [[TMP7:%.*]] = select i1 [[TMP2]], i1 [[TMP6]], i1 false -; PRED-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] -; PRED-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[N]], 4294967295 -; PRED-NEXT: [[TMP10:%.*]] = icmp ne i32 [[MUL_X]], 0 -; PRED-NEXT: [[TMP11:%.*]] = and i1 [[TMP9]], [[TMP10]] -; PRED-NEXT: [[TMP12:%.*]] = or i1 [[TMP8]], [[TMP11]] -; PRED-NEXT: br i1 [[TMP12]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; PRED: [[VECTOR_PH]]: -; PRED-NEXT: [[TMP13:%.*]] = sub i64 [[TMP0]], 2 -; PRED-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[TMP0]], 2 -; PRED-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0 -; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 0, i64 [[TMP0]]) -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[MUL_X]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer -; PRED-NEXT: br label %[[VECTOR_BODY:.*]] -; PRED: [[VECTOR_BODY]]: -; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE2:.*]] ] -; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[PRED_STORE_CONTINUE2]] ] -; PRED-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE2]] ] -; PRED-NEXT: [[TMP16:%.*]] = mul <2 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]] -; PRED-NEXT: [[TMP17:%.*]] = zext <2 x i32> [[TMP16]] to <2 x i64> -; PRED-NEXT: [[TMP18:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 -; PRED-NEXT: br i1 [[TMP18]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; PRED: [[PRED_STORE_IF]]: -; PRED-NEXT: [[TMP19:%.*]] = extractelement <2 x i64> [[TMP17]], i32 0 -; PRED-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP19]] -; PRED-NEXT: store i32 1, ptr [[TMP20]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE]] -; PRED: [[PRED_STORE_CONTINUE]]: -; PRED-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 -; PRED-NEXT: br i1 [[TMP21]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2]] -; PRED: [[PRED_STORE_IF1]]: -; PRED-NEXT: [[TMP22:%.*]] = extractelement <2 x i64> [[TMP17]], i32 1 -; PRED-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP22]] -; PRED-NEXT: store i32 1, ptr [[TMP23]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE2]] -; PRED: [[PRED_STORE_CONTINUE2]]: -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 -; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX]], i64 [[TMP15]]) -; PRED-NEXT: [[TMP24:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 -; PRED-NEXT: [[TMP25:%.*]] = xor i1 [[TMP24]], true -; PRED-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) -; PRED-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; PRED: [[MIDDLE_BLOCK]]: -; PRED-NEXT: br label %[[EXIT:.*]] -; PRED: [[SCALAR_PH]]: ; PRED-NEXT: br label %[[FOR_BODY:.*]] ; PRED: [[FOR_BODY]]: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; PRED-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32 ; PRED-NEXT: [[ADD_I:%.*]] = mul i32 [[MUL_X]], [[TRUNC_IV]] ; PRED-NEXT: [[IV_MUL:%.*]] = zext i32 [[ADD_I]] to i64 @@ -341,7 +283,7 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 { ; PRED-NEXT: store i32 1, ptr [[GEP]], align 4 ; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] -; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; PRED-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[FOR_BODY]] ; PRED: [[EXIT]]: ; PRED-NEXT: ret void ; @@ -437,101 +379,21 @@ define void @trunc_ivs_and_store(i32 %x, ptr %dst, i64 %N) #0 { ; ; PRED-LABEL: define void @trunc_ivs_and_store( ; PRED-SAME: i32 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; PRED-NEXT: [[ENTRY:.*:]] -; PRED-NEXT: [[MUL:%.*]] = mul i32 [[X]], [[X]] -; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 -; PRED-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; PRED: [[VECTOR_SCEVCHECK]]: +; PRED-NEXT: [[ENTRY:.*]]: ; PRED-NEXT: [[TMP1:%.*]] = mul i32 [[X]], [[X]] -; PRED-NEXT: [[TMP2:%.*]] = sub i32 0, [[TMP1]] -; PRED-NEXT: [[TMP3:%.*]] = icmp slt i32 [[MUL]], 0 -; PRED-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 [[MUL]] -; PRED-NEXT: [[TMP5:%.*]] = trunc i64 [[N]] to i32 -; PRED-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP4]], i32 [[TMP5]]) -; PRED-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 -; PRED-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 -; PRED-NEXT: [[TMP6:%.*]] = sub i32 0, [[MUL_RESULT]] -; PRED-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], 0 -; PRED-NEXT: [[TMP8:%.*]] = select i1 [[TMP3]], i1 [[TMP7]], i1 false -; PRED-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]] -; PRED-NEXT: [[TMP10:%.*]] = icmp ugt i64 [[N]], 4294967295 -; PRED-NEXT: [[TMP11:%.*]] = icmp ne i32 [[MUL]], 0 -; PRED-NEXT: [[TMP12:%.*]] = and i1 [[TMP10]], [[TMP11]] -; PRED-NEXT: [[TMP13:%.*]] = or i1 [[TMP9]], [[TMP12]] -; PRED-NEXT: br i1 [[TMP13]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; PRED: [[VECTOR_PH]]: -; PRED-NEXT: [[TMP14:%.*]] = sub i64 [[TMP0]], 4 -; PRED-NEXT: [[TMP15:%.*]] = icmp ugt i64 [[TMP0]], 4 -; PRED-NEXT: [[TMP16:%.*]] = select i1 [[TMP15]], i64 [[TMP14]], i64 0 -; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 [[TMP0]]) -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[MUL]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; PRED-NEXT: br label %[[VECTOR_BODY:.*]] -; PRED: [[VECTOR_BODY]]: -; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE7:.*]] ] -; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[PRED_STORE_CONTINUE7]] ] -; PRED-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE7]] ] -; PRED-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32 -; PRED-NEXT: [[TMP17:%.*]] = mul <4 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]] -; PRED-NEXT: [[TMP18:%.*]] = zext <4 x i32> [[TMP17]] to <4 x i64> -; PRED-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 0 -; PRED-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; PRED: [[PRED_STORE_IF]]: -; PRED-NEXT: [[TMP20:%.*]] = extractelement <4 x i64> [[TMP18]], i32 0 -; PRED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP20]] -; PRED-NEXT: [[TMP22:%.*]] = add i32 [[OFFSET_IDX]], 0 -; PRED-NEXT: store i32 [[TMP22]], ptr [[TMP21]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE]] -; PRED: [[PRED_STORE_CONTINUE]]: -; PRED-NEXT: [[TMP23:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 1 -; PRED-NEXT: br i1 [[TMP23]], label %[[PRED_STORE_IF2:.*]], label %[[PRED_STORE_CONTINUE3:.*]] -; PRED: [[PRED_STORE_IF2]]: -; PRED-NEXT: [[TMP24:%.*]] = extractelement <4 x i64> [[TMP18]], i32 1 -; PRED-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP24]] -; PRED-NEXT: [[TMP26:%.*]] = add i32 [[OFFSET_IDX]], 1 -; PRED-NEXT: store i32 [[TMP26]], ptr [[TMP25]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE3]] -; PRED: [[PRED_STORE_CONTINUE3]]: -; PRED-NEXT: [[TMP27:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 2 -; PRED-NEXT: br i1 [[TMP27]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5:.*]] -; PRED: [[PRED_STORE_IF4]]: -; PRED-NEXT: [[TMP28:%.*]] = extractelement <4 x i64> [[TMP18]], i32 2 -; PRED-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP28]] -; PRED-NEXT: [[TMP30:%.*]] = add i32 [[OFFSET_IDX]], 2 -; PRED-NEXT: store i32 [[TMP30]], ptr [[TMP29]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE5]] -; PRED: [[PRED_STORE_CONTINUE5]]: -; PRED-NEXT: [[TMP31:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 3 -; PRED-NEXT: br i1 [[TMP31]], label %[[PRED_STORE_IF6:.*]], label %[[PRED_STORE_CONTINUE7]] -; PRED: [[PRED_STORE_IF6]]: -; PRED-NEXT: [[TMP32:%.*]] = extractelement <4 x i64> [[TMP18]], i32 3 -; PRED-NEXT: [[TMP33:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP32]] -; PRED-NEXT: [[TMP34:%.*]] = add i32 [[OFFSET_IDX]], 3 -; PRED-NEXT: store i32 [[TMP34]], ptr [[TMP33]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE7]] -; PRED: [[PRED_STORE_CONTINUE7]]: -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 -; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 [[TMP16]]) -; PRED-NEXT: [[TMP35:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 -; PRED-NEXT: [[TMP36:%.*]] = xor i1 [[TMP35]], true -; PRED-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) -; PRED-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] -; PRED: [[MIDDLE_BLOCK]]: -; PRED-NEXT: br label %[[EXIT:.*]] -; PRED: [[SCALAR_PH]]: ; PRED-NEXT: br label %[[LOOP:.*]] ; PRED: [[LOOP]]: -; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] ; PRED-NEXT: [[IV_1_TRUNC:%.*]] = trunc i64 [[IV_1]] to i32 -; PRED-NEXT: [[IV_1_MUL:%.*]] = mul i32 [[MUL]], [[IV_1_TRUNC]] +; PRED-NEXT: [[IV_1_MUL:%.*]] = mul i32 [[TMP1]], [[IV_1_TRUNC]] ; PRED-NEXT: [[IV_2_NEXT]] = add i32 [[IV_2]], 1 ; PRED-NEXT: [[MUL_EXT:%.*]] = zext i32 [[IV_1_MUL]] to i64 ; PRED-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[DST]], i64 [[MUL_EXT]] ; PRED-NEXT: store i32 [[IV_2]], ptr [[GEP]], align 4 ; PRED-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], 1 ; PRED-NEXT: [[EXITCOND_3_NOT:%.*]] = icmp eq i64 [[IV_1]], [[N]] -; PRED-NEXT: br i1 [[EXITCOND_3_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; PRED-NEXT: br i1 [[EXITCOND_3_NOT]], label %[[EXIT:.*]], label %[[LOOP]] ; PRED: [[EXIT]]: ; PRED-NEXT: ret void ; @@ -627,91 +489,12 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 { ; ; PRED-LABEL: define void @ivs_trunc_and_ext( ; PRED-SAME: i32 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; PRED-NEXT: [[ENTRY:.*:]] +; PRED-NEXT: [[ENTRY:.*]]: ; PRED-NEXT: [[ADD:%.*]] = add i32 [[X]], 1 -; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 -; PRED-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; PRED: [[VECTOR_SCEVCHECK]]: -; PRED-NEXT: [[TMP1:%.*]] = sub i32 -1, [[X]] -; PRED-NEXT: [[TMP2:%.*]] = icmp slt i32 [[ADD]], 0 -; PRED-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 [[ADD]] -; PRED-NEXT: [[TMP4:%.*]] = trunc i64 [[N]] to i32 -; PRED-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP3]], i32 [[TMP4]]) -; PRED-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 -; PRED-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 -; PRED-NEXT: [[TMP5:%.*]] = sub i32 0, [[MUL_RESULT]] -; PRED-NEXT: [[TMP6:%.*]] = icmp ugt i32 [[TMP5]], 0 -; PRED-NEXT: [[TMP7:%.*]] = select i1 [[TMP2]], i1 [[TMP6]], i1 false -; PRED-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] -; PRED-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[N]], 4294967295 -; PRED-NEXT: [[TMP10:%.*]] = icmp ne i32 [[ADD]], 0 -; PRED-NEXT: [[TMP11:%.*]] = and i1 [[TMP9]], [[TMP10]] -; PRED-NEXT: [[TMP12:%.*]] = or i1 [[TMP8]], [[TMP11]] -; PRED-NEXT: br i1 [[TMP12]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; PRED: [[VECTOR_PH]]: -; PRED-NEXT: [[TMP13:%.*]] = sub i64 [[TMP0]], 4 -; PRED-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[TMP0]], 4 -; PRED-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0 -; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 [[TMP0]]) -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[ADD]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; PRED-NEXT: br label %[[VECTOR_BODY:.*]] -; PRED: [[VECTOR_BODY]]: -; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE6:.*]] ] -; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[PRED_STORE_CONTINUE6]] ] -; PRED-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE6]] ] -; PRED-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32 -; PRED-NEXT: [[TMP16:%.*]] = mul <4 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]] -; PRED-NEXT: [[TMP17:%.*]] = zext <4 x i32> [[TMP16]] to <4 x i64> -; PRED-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 0 -; PRED-NEXT: br i1 [[TMP18]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; PRED: [[PRED_STORE_IF]]: -; PRED-NEXT: [[TMP19:%.*]] = extractelement <4 x i64> [[TMP17]], i32 0 -; PRED-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP19]] -; PRED-NEXT: [[TMP21:%.*]] = add i32 [[OFFSET_IDX]], 0 -; PRED-NEXT: store i32 [[TMP21]], ptr [[TMP20]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE]] -; PRED: [[PRED_STORE_CONTINUE]]: -; PRED-NEXT: [[TMP22:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 1 -; PRED-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]] -; PRED: [[PRED_STORE_IF1]]: -; PRED-NEXT: [[TMP23:%.*]] = extractelement <4 x i64> [[TMP17]], i32 1 -; PRED-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP23]] -; PRED-NEXT: [[TMP25:%.*]] = add i32 [[OFFSET_IDX]], 1 -; PRED-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE2]] -; PRED: [[PRED_STORE_CONTINUE2]]: -; PRED-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 2 -; PRED-NEXT: br i1 [[TMP26]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]] -; PRED: [[PRED_STORE_IF3]]: -; PRED-NEXT: [[TMP27:%.*]] = extractelement <4 x i64> [[TMP17]], i32 2 -; PRED-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP27]] -; PRED-NEXT: [[TMP29:%.*]] = add i32 [[OFFSET_IDX]], 2 -; PRED-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE4]] -; PRED: [[PRED_STORE_CONTINUE4]]: -; PRED-NEXT: [[TMP30:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 3 -; PRED-NEXT: br i1 [[TMP30]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6]] -; PRED: [[PRED_STORE_IF5]]: -; PRED-NEXT: [[TMP31:%.*]] = extractelement <4 x i64> [[TMP17]], i32 3 -; PRED-NEXT: [[TMP32:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP31]] -; PRED-NEXT: [[TMP33:%.*]] = add i32 [[OFFSET_IDX]], 3 -; PRED-NEXT: store i32 [[TMP33]], ptr [[TMP32]], align 4 -; PRED-NEXT: br label %[[PRED_STORE_CONTINUE6]] -; PRED: [[PRED_STORE_CONTINUE6]]: -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 -; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 [[TMP15]]) -; PRED-NEXT: [[TMP34:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 -; PRED-NEXT: [[TMP35:%.*]] = xor i1 [[TMP34]], true -; PRED-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) -; PRED-NEXT: br i1 [[TMP35]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] -; PRED: [[MIDDLE_BLOCK]]: -; PRED-NEXT: br label %[[EXIT:.*]] -; PRED: [[SCALAR_PH]]: ; PRED-NEXT: br label %[[LOOP:.*]] ; PRED: [[LOOP]]: -; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] ; PRED-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV_1]] to i32 ; PRED-NEXT: [[IV_MUL:%.*]] = mul i32 [[ADD]], [[IV_TRUNC]] ; PRED-NEXT: [[IV_2_NEXT]] = add i32 [[IV_2]], 1 @@ -720,7 +503,7 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 { ; PRED-NEXT: store i32 [[IV_2]], ptr [[GEP]], align 4 ; PRED-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], 1 ; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_1]], [[N]] -; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] +; PRED-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] ; PRED: [[EXIT]]: ; PRED-NEXT: ret void ; @@ -842,7 +625,7 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) { ; PRED: [[PRED_STORE_CONTINUE5]]: ; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 ; PRED-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; PRED-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; PRED-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; PRED: [[MIDDLE_BLOCK]]: ; PRED-NEXT: br label %[[EXIT:.*]] ; PRED: [[SCALAR_PH]]: @@ -855,7 +638,7 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) { ; PRED-NEXT: [[IV_1_NEXT]] = add i32 [[IV_1]], 1 ; PRED-NEXT: [[IV_EXT]] = zext i32 [[IV_1_NEXT]] to i64 ; PRED-NEXT: [[C:%.*]] = icmp ult i64 [[IV_EXT]], [[N]] -; PRED-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP11:![0-9]+]] +; PRED-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; PRED: [[EXIT]]: ; PRED-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll index 0c6a490ddf4ba..eceda0897b174 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll @@ -17,17 +17,15 @@ define void @widen_extractvalue(ptr %dst, {i64, i64} %sv) #0 { ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1000, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1000, [[N_MOD_VF]] ; CHECK-NEXT: [[EXTRACT0:%.*]] = extractvalue { i64, i64 } [[SV]], 0 -; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement poison, i64 [[EXTRACT0]], i64 0 -; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector [[DOTSPLATINSERT1]], poison, zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { i64, i64 } [[SV]], 1 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP10]], i64 0 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[EXTRACT0]], [[TMP10]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP6]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer -; CHECK-NEXT: [[TMP7:%.*]] = add [[DOTSPLAT2]], [[BROADCAST_SPLAT2]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]] -; CHECK-NEXT: store [[TMP7]], ptr [[TMP8]], align 8 +; CHECK-NEXT: store [[BROADCAST_SPLAT2]], ptr [[TMP8]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll index 6ea075f76aed4..83be0708774f1 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll @@ -181,178 +181,23 @@ for.cond.cleanup: define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) { ; DEFAULT-LABEL: define void @tail_predicate_without_optsize( ; DEFAULT-SAME: ptr [[P:%.*]], i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]], i32 [[N:%.*]]) { -; DEFAULT-NEXT: [[ENTRY:.*:]] -; DEFAULT-NEXT: br label %[[VECTOR_PH:.*]] -; DEFAULT: [[VECTOR_PH]]: -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <16 x i8> poison, i8 [[B]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT3]], <16 x i8> poison, <16 x i32> zeroinitializer -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT5]], <16 x i8> poison, <16 x i32> zeroinitializer -; DEFAULT-NEXT: br label %[[VECTOR_BODY:.*]] -; DEFAULT: [[VECTOR_BODY]]: -; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE35:.*]] ] -; DEFAULT-NEXT: [[VEC_IND:%.*]] = phi <16 x i8> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE35]] ] -; DEFAULT-NEXT: [[VEC_IND1:%.*]] = phi <16 x i8> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], %[[PRED_STORE_CONTINUE35]] ] -; DEFAULT-NEXT: [[TMP0:%.*]] = icmp ule <16 x i8> [[VEC_IND]], splat (i8 14) -; DEFAULT-NEXT: [[TMP1:%.*]] = mul <16 x i8> [[BROADCAST_SPLAT]], [[VEC_IND1]] -; DEFAULT-NEXT: [[TMP2:%.*]] = lshr <16 x i8> [[VEC_IND1]], splat (i8 1) -; DEFAULT-NEXT: [[TMP3:%.*]] = mul <16 x i8> [[TMP2]], [[BROADCAST_SPLAT4]] -; DEFAULT-NEXT: [[TMP4:%.*]] = add <16 x i8> [[TMP3]], [[TMP1]] -; DEFAULT-NEXT: [[TMP5:%.*]] = lshr <16 x i8> [[VEC_IND1]], splat (i8 2) -; DEFAULT-NEXT: [[TMP6:%.*]] = mul <16 x i8> [[TMP5]], [[BROADCAST_SPLAT6]] -; DEFAULT-NEXT: [[TMP7:%.*]] = add <16 x i8> [[TMP4]], [[TMP6]] -; DEFAULT-NEXT: [[TMP8:%.*]] = extractelement <16 x i1> [[TMP0]], i32 0 -; DEFAULT-NEXT: br i1 [[TMP8]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; DEFAULT: [[PRED_STORE_IF]]: -; DEFAULT-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0 -; DEFAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP9]] -; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <16 x i8> [[TMP7]], i32 0 -; DEFAULT-NEXT: store i8 [[TMP11]], ptr [[TMP10]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE]] -; DEFAULT: [[PRED_STORE_CONTINUE]]: -; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <16 x i1> [[TMP0]], i32 1 -; DEFAULT-NEXT: br i1 [[TMP12]], label %[[PRED_STORE_IF6:.*]], label %[[PRED_STORE_CONTINUE7:.*]] -; DEFAULT: [[PRED_STORE_IF6]]: -; DEFAULT-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 1 -; DEFAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP13]] -; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <16 x i8> [[TMP7]], i32 1 -; DEFAULT-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE7]] -; DEFAULT: [[PRED_STORE_CONTINUE7]]: -; DEFAULT-NEXT: [[TMP16:%.*]] = extractelement <16 x i1> [[TMP0]], i32 2 -; DEFAULT-NEXT: br i1 [[TMP16]], label %[[PRED_STORE_IF8:.*]], label %[[PRED_STORE_CONTINUE9:.*]] -; DEFAULT: [[PRED_STORE_IF8]]: -; DEFAULT-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 2 -; DEFAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP17]] -; DEFAULT-NEXT: [[TMP19:%.*]] = extractelement <16 x i8> [[TMP7]], i32 2 -; DEFAULT-NEXT: store i8 [[TMP19]], ptr [[TMP18]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE9]] -; DEFAULT: [[PRED_STORE_CONTINUE9]]: -; DEFAULT-NEXT: [[TMP20:%.*]] = extractelement <16 x i1> [[TMP0]], i32 3 -; DEFAULT-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF10:.*]], label %[[PRED_STORE_CONTINUE11:.*]] -; DEFAULT: [[PRED_STORE_IF10]]: -; DEFAULT-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 3 -; DEFAULT-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP21]] -; DEFAULT-NEXT: [[TMP23:%.*]] = extractelement <16 x i8> [[TMP7]], i32 3 -; DEFAULT-NEXT: store i8 [[TMP23]], ptr [[TMP22]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE11]] -; DEFAULT: [[PRED_STORE_CONTINUE11]]: -; DEFAULT-NEXT: [[TMP24:%.*]] = extractelement <16 x i1> [[TMP0]], i32 4 -; DEFAULT-NEXT: br i1 [[TMP24]], label %[[PRED_STORE_IF12:.*]], label %[[PRED_STORE_CONTINUE13:.*]] -; DEFAULT: [[PRED_STORE_IF12]]: -; DEFAULT-NEXT: [[TMP25:%.*]] = add i64 [[INDEX]], 4 -; DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP25]] -; DEFAULT-NEXT: [[TMP27:%.*]] = extractelement <16 x i8> [[TMP7]], i32 4 -; DEFAULT-NEXT: store i8 [[TMP27]], ptr [[TMP26]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE13]] -; DEFAULT: [[PRED_STORE_CONTINUE13]]: -; DEFAULT-NEXT: [[TMP28:%.*]] = extractelement <16 x i1> [[TMP0]], i32 5 -; DEFAULT-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF14:.*]], label %[[PRED_STORE_CONTINUE15:.*]] -; DEFAULT: [[PRED_STORE_IF14]]: -; DEFAULT-NEXT: [[TMP29:%.*]] = add i64 [[INDEX]], 5 -; DEFAULT-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP29]] -; DEFAULT-NEXT: [[TMP31:%.*]] = extractelement <16 x i8> [[TMP7]], i32 5 -; DEFAULT-NEXT: store i8 [[TMP31]], ptr [[TMP30]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE15]] -; DEFAULT: [[PRED_STORE_CONTINUE15]]: -; DEFAULT-NEXT: [[TMP32:%.*]] = extractelement <16 x i1> [[TMP0]], i32 6 -; DEFAULT-NEXT: br i1 [[TMP32]], label %[[PRED_STORE_IF16:.*]], label %[[PRED_STORE_CONTINUE17:.*]] -; DEFAULT: [[PRED_STORE_IF16]]: -; DEFAULT-NEXT: [[TMP33:%.*]] = add i64 [[INDEX]], 6 -; DEFAULT-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP33]] -; DEFAULT-NEXT: [[TMP35:%.*]] = extractelement <16 x i8> [[TMP7]], i32 6 -; DEFAULT-NEXT: store i8 [[TMP35]], ptr [[TMP34]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE17]] -; DEFAULT: [[PRED_STORE_CONTINUE17]]: -; DEFAULT-NEXT: [[TMP36:%.*]] = extractelement <16 x i1> [[TMP0]], i32 7 -; DEFAULT-NEXT: br i1 [[TMP36]], label %[[PRED_STORE_IF18:.*]], label %[[PRED_STORE_CONTINUE19:.*]] -; DEFAULT: [[PRED_STORE_IF18]]: -; DEFAULT-NEXT: [[TMP37:%.*]] = add i64 [[INDEX]], 7 -; DEFAULT-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP37]] -; DEFAULT-NEXT: [[TMP39:%.*]] = extractelement <16 x i8> [[TMP7]], i32 7 -; DEFAULT-NEXT: store i8 [[TMP39]], ptr [[TMP38]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE19]] -; DEFAULT: [[PRED_STORE_CONTINUE19]]: -; DEFAULT-NEXT: [[TMP40:%.*]] = extractelement <16 x i1> [[TMP0]], i32 8 -; DEFAULT-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF20:.*]], label %[[PRED_STORE_CONTINUE21:.*]] -; DEFAULT: [[PRED_STORE_IF20]]: -; DEFAULT-NEXT: [[TMP41:%.*]] = add i64 [[INDEX]], 8 -; DEFAULT-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP41]] -; DEFAULT-NEXT: [[TMP43:%.*]] = extractelement <16 x i8> [[TMP7]], i32 8 -; DEFAULT-NEXT: store i8 [[TMP43]], ptr [[TMP42]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE21]] -; DEFAULT: [[PRED_STORE_CONTINUE21]]: -; DEFAULT-NEXT: [[TMP44:%.*]] = extractelement <16 x i1> [[TMP0]], i32 9 -; DEFAULT-NEXT: br i1 [[TMP44]], label %[[PRED_STORE_IF22:.*]], label %[[PRED_STORE_CONTINUE23:.*]] -; DEFAULT: [[PRED_STORE_IF22]]: -; DEFAULT-NEXT: [[TMP45:%.*]] = add i64 [[INDEX]], 9 -; DEFAULT-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP45]] -; DEFAULT-NEXT: [[TMP47:%.*]] = extractelement <16 x i8> [[TMP7]], i32 9 -; DEFAULT-NEXT: store i8 [[TMP47]], ptr [[TMP46]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE23]] -; DEFAULT: [[PRED_STORE_CONTINUE23]]: -; DEFAULT-NEXT: [[TMP48:%.*]] = extractelement <16 x i1> [[TMP0]], i32 10 -; DEFAULT-NEXT: br i1 [[TMP48]], label %[[PRED_STORE_IF24:.*]], label %[[PRED_STORE_CONTINUE25:.*]] -; DEFAULT: [[PRED_STORE_IF24]]: -; DEFAULT-NEXT: [[TMP49:%.*]] = add i64 [[INDEX]], 10 -; DEFAULT-NEXT: [[TMP50:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP49]] -; DEFAULT-NEXT: [[TMP51:%.*]] = extractelement <16 x i8> [[TMP7]], i32 10 -; DEFAULT-NEXT: store i8 [[TMP51]], ptr [[TMP50]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE25]] -; DEFAULT: [[PRED_STORE_CONTINUE25]]: -; DEFAULT-NEXT: [[TMP52:%.*]] = extractelement <16 x i1> [[TMP0]], i32 11 -; DEFAULT-NEXT: br i1 [[TMP52]], label %[[PRED_STORE_IF26:.*]], label %[[PRED_STORE_CONTINUE27:.*]] -; DEFAULT: [[PRED_STORE_IF26]]: -; DEFAULT-NEXT: [[TMP53:%.*]] = add i64 [[INDEX]], 11 -; DEFAULT-NEXT: [[TMP54:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP53]] -; DEFAULT-NEXT: [[TMP55:%.*]] = extractelement <16 x i8> [[TMP7]], i32 11 -; DEFAULT-NEXT: store i8 [[TMP55]], ptr [[TMP54]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE27]] -; DEFAULT: [[PRED_STORE_CONTINUE27]]: -; DEFAULT-NEXT: [[TMP56:%.*]] = extractelement <16 x i1> [[TMP0]], i32 12 -; DEFAULT-NEXT: br i1 [[TMP56]], label %[[PRED_STORE_IF28:.*]], label %[[PRED_STORE_CONTINUE29:.*]] -; DEFAULT: [[PRED_STORE_IF28]]: -; DEFAULT-NEXT: [[TMP57:%.*]] = add i64 [[INDEX]], 12 -; DEFAULT-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP57]] -; DEFAULT-NEXT: [[TMP59:%.*]] = extractelement <16 x i8> [[TMP7]], i32 12 -; DEFAULT-NEXT: store i8 [[TMP59]], ptr [[TMP58]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE29]] -; DEFAULT: [[PRED_STORE_CONTINUE29]]: -; DEFAULT-NEXT: [[TMP60:%.*]] = extractelement <16 x i1> [[TMP0]], i32 13 -; DEFAULT-NEXT: br i1 [[TMP60]], label %[[PRED_STORE_IF30:.*]], label %[[PRED_STORE_CONTINUE31:.*]] -; DEFAULT: [[PRED_STORE_IF30]]: -; DEFAULT-NEXT: [[TMP61:%.*]] = add i64 [[INDEX]], 13 -; DEFAULT-NEXT: [[TMP62:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP61]] -; DEFAULT-NEXT: [[TMP63:%.*]] = extractelement <16 x i8> [[TMP7]], i32 13 -; DEFAULT-NEXT: store i8 [[TMP63]], ptr [[TMP62]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE31]] -; DEFAULT: [[PRED_STORE_CONTINUE31]]: -; DEFAULT-NEXT: [[TMP64:%.*]] = extractelement <16 x i1> [[TMP0]], i32 14 -; DEFAULT-NEXT: br i1 [[TMP64]], label %[[PRED_STORE_IF32:.*]], label %[[PRED_STORE_CONTINUE33:.*]] -; DEFAULT: [[PRED_STORE_IF32]]: -; DEFAULT-NEXT: [[TMP65:%.*]] = add i64 [[INDEX]], 14 -; DEFAULT-NEXT: [[TMP66:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP65]] -; DEFAULT-NEXT: [[TMP67:%.*]] = extractelement <16 x i8> [[TMP7]], i32 14 -; DEFAULT-NEXT: store i8 [[TMP67]], ptr [[TMP66]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE33]] -; DEFAULT: [[PRED_STORE_CONTINUE33]]: -; DEFAULT-NEXT: [[TMP68:%.*]] = extractelement <16 x i1> [[TMP0]], i32 15 -; DEFAULT-NEXT: br i1 [[TMP68]], label %[[PRED_STORE_IF34:.*]], label %[[PRED_STORE_CONTINUE35]] -; DEFAULT: [[PRED_STORE_IF34]]: -; DEFAULT-NEXT: [[TMP69:%.*]] = add i64 [[INDEX]], 15 +; DEFAULT-NEXT: [[ENTRY:.*]]: +; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] +; DEFAULT: [[FOR_BODY]]: +; DEFAULT-NEXT: [[TMP69:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; DEFAULT-NEXT: [[TMP0:%.*]] = trunc nuw nsw i64 [[TMP69]] to i8 +; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP0]] +; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP0]], 1 +; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] +; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] +; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP0]], 2 +; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] +; DEFAULT-NEXT: [[TMP71:%.*]] = add i8 [[ADD]], [[MUL9]] ; DEFAULT-NEXT: [[TMP70:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP69]] -; DEFAULT-NEXT: [[TMP71:%.*]] = extractelement <16 x i8> [[TMP7]], i32 15 ; DEFAULT-NEXT: store i8 [[TMP71]], ptr [[TMP70]], align 1 -; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE35]] -; DEFAULT: [[PRED_STORE_CONTINUE35]]: -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <16 x i8> [[VEC_IND]], splat (i8 16) -; DEFAULT-NEXT: [[VEC_IND_NEXT2]] = add <16 x i8> [[VEC_IND1]], splat (i8 16) -; DEFAULT-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; DEFAULT: [[MIDDLE_BLOCK]]: -; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] +; DEFAULT-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP69]], 1 +; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15 +; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -449,7 +294,7 @@ define void @dont_vectorize_with_minsize() { ; DEFAULT-NEXT: store <4 x i16> [[TMP11]], ptr [[TMP9]], align 2 ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 4 ; DEFAULT-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] ; DEFAULT: [[FOR_COND_CLEANUP]]: @@ -555,7 +400,7 @@ define void @vectorization_forced() { ; DEFAULT-NEXT: store <4 x i16> [[TMP11]], ptr [[TMP9]], align 2 ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 4 ; DEFAULT-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] ; DEFAULT: [[FOR_COND_CLEANUP]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll index f25b86d3b20c2..b81637f50989d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll @@ -293,9 +293,9 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[A]], i64 0 +; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[A]], -1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP0]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[TMP19:%.*]] = xor [[BROADCAST_SPLAT]], splat (i32 -1) ; CHECK-NEXT: [[TMP6:%.*]] = call @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP7:%.*]] = mul [[TMP6]], splat (i64 9) ; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP7]] @@ -309,7 +309,7 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) { ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP9]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[DST]], [[VEC_IND]] -; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP19]], align 4 [[TMP16]], splat (i1 true), i32 [[TMP8]]) +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[BROADCAST_SPLAT]], align 4 [[TMP16]], splat (i1 true), i32 [[TMP8]]) ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 diff --git a/llvm/test/Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll b/llvm/test/Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll index a286df9bc2fc7..c2c04ce6f5ff5 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll @@ -85,13 +85,13 @@ define void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr noalias no ; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %2, ptr %arrayidx7, align 2 ; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %0, ptr %arrayidx2, align 2 ; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %2, ptr %arrayidx7, align 2 -; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 5 for VF 2 For instruction: store i16 %0, ptr %arrayidx2, align 2 +; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 8 for VF 2 For instruction: store i16 %0, ptr %arrayidx2, align 2 ; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 3000000 for VF 2 For instruction: store i16 %2, ptr %arrayidx7, align 2 -; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 10 for VF 4 For instruction: store i16 %0, ptr %arrayidx2, align 2 +; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 17 for VF 4 For instruction: store i16 %0, ptr %arrayidx2, align 2 ; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 3000000 for VF 4 For instruction: store i16 %2, ptr %arrayidx7, align 2 -; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 21 for VF 8 For instruction: store i16 %0, ptr %arrayidx2, align 2 +; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 35 for VF 8 For instruction: store i16 %0, ptr %arrayidx2, align 2 ; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 3000000 for VF 8 For instruction: store i16 %2, ptr %arrayidx7, align 2 -; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 43 for VF 16 For instruction: store i16 %0, ptr %arrayidx2, align 2 +; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 71 for VF 16 For instruction: store i16 %0, ptr %arrayidx2, align 2 ; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 3000000 for VF 16 For instruction: store i16 %2, ptr %arrayidx7, align 2 ; ; ENABLED_MASKED_STRIDED-LABEL: 'test2' @@ -99,8 +99,8 @@ define void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr noalias no ; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %2, ptr %arrayidx7, align 2 ; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %0, ptr %arrayidx2, align 2 ; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %2, ptr %arrayidx7, align 2 -; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 5 for VF 2 For instruction: store i16 %0, ptr %arrayidx2, align 2 -; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 5 for VF 2 For instruction: store i16 %2, ptr %arrayidx7, align 2 +; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 0 for VF 2 For instruction: store i16 %0, ptr %arrayidx2, align 2 +; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 13 for VF 2 For instruction: store i16 %2, ptr %arrayidx7, align 2 ; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 0 for VF 4 For instruction: store i16 %0, ptr %arrayidx2, align 2 ; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 14 for VF 4 For instruction: store i16 %2, ptr %arrayidx7, align 2 ; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 0 for VF 8 For instruction: store i16 %0, ptr %arrayidx2, align 2 diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll index cc84fabd00ecc..002d811d46992 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll @@ -435,67 +435,16 @@ define void @test_first_order_recurrence_tried_to_scalarized(ptr %dst, i1 %c, i3 ; CHECK-LABEL: @test_first_order_recurrence_tried_to_scalarized( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[N:%.*]] = select i1 [[C:%.*]], i32 8, i32 9 -; CHECK-NEXT: br label [[VECTOR_PH:%.*]] -; CHECK: vector.ph: -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], 3 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], 4 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[N]], 1 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ] -; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND]], [[PRED_STORE_CONTINUE6]] ] -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[VEC_IND]], <4 x i32> -; CHECK-NEXT: [[TMP1:%.*]] = icmp ule <4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0 -; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] -; CHECK: pred.store.if: -; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i32, ptr [[DST:%.*]], i32 [[TMP3]] -; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = sub nsw i32 10, [[TMP5]] -; CHECK-NEXT: store i32 [[TMP6]], ptr [[TMP4]], align 4 -; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]] -; CHECK: pred.store.continue: -; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i1> [[TMP1]], i32 1 -; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]] -; CHECK: pred.store.if1: -; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 1 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[TMP8]] -; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1 -; CHECK-NEXT: [[TMP11:%.*]] = sub nsw i32 10, [[TMP10]] -; CHECK-NEXT: store i32 [[TMP11]], ptr [[TMP9]], align 4 -; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]] -; CHECK: pred.store.continue2: -; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP1]], i32 2 -; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]] -; CHECK: pred.store.if3: -; CHECK-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 2 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[TMP13]] -; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2 +; CHECK: loop: +; CHECK-NEXT: [[TMP18:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP15:%.*]] = phi i32 [ 4, [[ENTRY]] ], [ [[TMP18]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[TMP18]], 1 ; CHECK-NEXT: [[TMP16:%.*]] = sub nsw i32 10, [[TMP15]] -; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP14]], align 4 -; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]] -; CHECK: pred.store.continue4: -; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i1> [[TMP1]], i32 3 -; CHECK-NEXT: br i1 [[TMP17]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]] -; CHECK: pred.store.if5: -; CHECK-NEXT: [[TMP18:%.*]] = add i32 [[INDEX]], 3 -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[TMP18]] -; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3 -; CHECK-NEXT: [[TMP21:%.*]] = sub nsw i32 10, [[TMP20]] -; CHECK-NEXT: store i32 [[TMP21]], ptr [[TMP19]], align 4 -; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]] -; CHECK: pred.store.continue6: -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) -; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] -; CHECK: middle.block: -; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw i32, ptr [[DST:%.*]], i32 [[TMP18]] +; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP19]], align 4 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT:%.*]], label [[VECTOR_BODY]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/debugloc.ll b/llvm/test/Transforms/LoopVectorize/debugloc.ll index 40cd6b63ca8f6..03e0853d29075 100644 --- a/llvm/test/Transforms/LoopVectorize/debugloc.ll +++ b/llvm/test/Transforms/LoopVectorize/debugloc.ll @@ -253,10 +253,10 @@ declare void @llvm.dbg.value(metadata, metadata, metadata) !32 = distinct !DILexicalBlock(scope: !31, file: !5, line: 137, column: 2) !33 = !DILocation(line: 210, column: 44, scope: !32) !34 = !DILocation(line: 320, column: 44, scope: !32) -!35 = distinct !DISubprogram(name: "test_misc", line: 3, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 3, file: !5, scope: !6, type: !7, retainedNodes: !12) +!35 = distinct !DISubprogram(name: "test_misc", line: 3, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 3, file: !5, scope: !6, type: !7, retainedNodes: !2) !36 = distinct !DILexicalBlock(scope: !35, file: !5, line: 137, column: 2) !37 = !DILocation(line: 430, column: 44, scope: !36) !38 = !DILocation(line: 540, column: 44, scope: !36) -!39 = distinct !DISubprogram(name: "test_scalar_Steps", line: 3, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 3, file: !5, scope: !6, type: !7, retainedNodes: !12) +!39 = distinct !DISubprogram(name: "test_scalar_Steps", line: 3, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 3, file: !5, scope: !6, type: !7, retainedNodes: !2) !40 = distinct !DILexicalBlock(scope: !39, file: !5, line: 137, column: 2) !41 = !DILocation(line: 650, column: 44, scope: !40) diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll index fe230fa6c9090..b72cbd333cb79 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll @@ -49,6 +49,8 @@ define void @sink_replicate_region_1(i32 %x, ptr %ptr, ptr noalias %dst) optsize ; CHECK-NEXT: loop.0: ; CHECK-NEXT: WIDEN-CAST ir<%conv> = sext vp<[[PRED1]]> to i32 ; CHECK-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%0>, ir<%conv> +; CHECK-NEXT: WIDEN ir<%rem> = srem vp<[[SPLICE]]>, ir<%x> +; CHECK-NEXT: WIDEN ir<%add> = add ir<%conv>, ir<%rem> ; CHECK-NEXT: Successor(s): pred.store ; CHECK-EMPTY: ; CHECK-NEXT: pred.store: { @@ -57,9 +59,7 @@ define void @sink_replicate_region_1(i32 %x, ptr %ptr, ptr noalias %dst) optsize ; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue ; CHECK-EMPTY: ; CHECK-NEXT: pred.store.if: -; CHECK-NEXT: REPLICATE ir<%rem> = srem vp<[[SPLICE]]>, ir<%x> ; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[STEPS]]> -; CHECK-NEXT: REPLICATE ir<%add> = add ir<%conv>, ir<%rem> ; CHECK-NEXT: REPLICATE store ir<%add>, ir<%gep.dst> ; CHECK-NEXT: Successor(s): pred.store.continue ; CHECK-EMPTY: @@ -293,27 +293,44 @@ define void @sink_replicate_region_4_requires_split_at_end_of_block(i32 %x, ptr ; CHECK-NEXT: loop.0: ; CHECK-NEXT: WIDEN-CAST ir<%conv> = sext vp<[[PRED]]> to i32 ; CHECK-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%0>, ir<%conv> -; CHECK-NEXT: Successor(s): pred.store +; CHECK-NEXT: WIDEN ir<%rem> = srem vp<[[SPLICE]]>, ir<%x> +; CHECK-NEXT: Successor(s): pred.load ; CHECK-EMPTY: -; CHECK: pred.store: { -; CHECK-NEXT: pred.store.entry: +; CHECK: pred.load: { +; CHECK-NEXT: pred.load.entry: ; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK]]> -; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue +; CHECK-NEXT: Successor(s): pred.load.if, pred.load.continue ; CHECK-EMPTY: -; CHECK: pred.store.if: -; CHECK-NEXT: REPLICATE ir<%lv.2> = load ir<%gep> -; CHECK-NEXT: REPLICATE ir<%rem> = srem vp<[[SPLICE]]>, ir<%x> -; CHECK-NEXT: REPLICATE ir<%conv.lv.2> = sext ir<%lv.2> -; CHECK-NEXT: REPLICATE ir<%add.1> = add ir<%conv>, ir<%rem> -; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[STEPS]]> -; CHECK-NEXT: REPLICATE ir<%add> = add ir<%add.1>, ir<%conv.lv.2> -; CHECK-NEXT: REPLICATE store ir<%add>, ir<%gep.dst> -; CHECK-NEXT: Successor(s): pred.store.continue +; CHECK: pred.load.if: +; CHECK-NEXT: REPLICATE ir<%lv.2> = load ir<%gep> (S->V) +; CHECK-NEXT: Successor(s): pred.load.continue ; CHECK-EMPTY: -; CHECK: pred.store.continue: +; CHECK: pred.load.continue: +; CHECK-NEXT: PHI-PREDICATED-INSTRUCTION vp<%9> = ir<%lv.2> ; CHECK-NEXT: No successors ; CHECK-NEXT: } -; CHECK-NEXT: Successor(s): loop.2 +; CHECK-NEXT: Successor(s): loop.1 +; CHECK-EMPTY: +; CHECK-NEXT: loop.1: +; CHECK-NEXT: WIDEN ir<%add.1> = add ir<%conv>, ir<%rem> +; CHECK-NEXT: WIDEN-CAST ir<%conv.lv.2> = sext vp<%9> to i32 +; CHECK-NEXT: WIDEN ir<%add> = add ir<%add.1>, ir<%conv.lv.2> +; CHECK-NEXT: Successor(s): pred.store +; CHECK-EMPTY: +; CHECK-NEXT: pred.store: { +; CHECK-NEXT: pred.store.entry: +; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK]]> +; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue +; CHECK-EMPTY: +; CHECK-NEXT: pred.store.if: +; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[STEPS]]> +; CHECK-NEXT: REPLICATE store ir<%add>, ir<%gep.dst> +; CHECK-NEXT: Successor(s): pred.store.continue +; CHECK-EMPTY: +; CHECK-NEXT: pred.store.continue: +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): loop.2 ; CHECK-EMPTY: ; CHECK: loop.2: ; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]> @@ -377,6 +394,7 @@ define void @sink_replicate_region_after_replicate_region(ptr %ptr, ptr noalias ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<[[VF]]> ; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv>, vp<[[BTC]]> ; CHECK-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%recur>, ir<%recur.next> +; CHECK-NEXT: WIDEN ir<%rem> = srem vp<[[SPLICE]]>, ir<%x> ; CHECK-NEXT: Successor(s): pred.store ; CHECK-EMPTY: ; CHECK-NEXT: pred.store: { @@ -386,7 +404,6 @@ define void @sink_replicate_region_after_replicate_region(ptr %ptr, ptr noalias ; CHECK-EMPTY: ; CHECK-NEXT: pred.store.if: ; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1> -; CHECK-NEXT: REPLICATE ir<%rem> = srem vp<[[SPLICE]]>, ir<%x> ; CHECK-NEXT: REPLICATE ir<%rem.div> = sdiv ir<20>, ir<%rem> ; CHECK-NEXT: REPLICATE ir<%gep> = getelementptr ir<%ptr>, vp<[[STEPS]]> ; CHECK-NEXT: REPLICATE store ir<%rem.div>, ir<%gep> @@ -457,6 +474,7 @@ define void @need_new_block_after_sinking_pr56146(i32 %x, ptr %src, ptr noalias ; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp ule vp<[[WIDE_IV]]>, vp<[[BTC]]> ; CHECK-NEXT: CLONE ir<[[L]]> = load ir<%src> ; CHECK-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%.pn>, ir<[[L]]> +; CHECK-NEXT: WIDEN ir<%val> = sdiv vp<[[SPLICE]]>, ir<%x> ; CHECK-NEXT: Successor(s): pred.store ; CHECK-EMPTY: ; CHECK-NEXT: pred.store: { @@ -467,7 +485,6 @@ define void @need_new_block_after_sinking_pr56146(i32 %x, ptr %src, ptr noalias ; CHECK-NEXT: pred.store.if: ; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<1>, vp<[[VF]]> ; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[SCALAR_STEPS]]> -; CHECK-NEXT: REPLICATE ir<%val> = sdiv vp<[[SPLICE]]>, ir<%x> ; CHECK-NEXT: REPLICATE store ir<%val>, ir<%gep.dst> ; CHECK-NEXT: Successor(s): pred.store.continue ; CHECK-EMPTY: diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll index 8a579734a06e1..372876c5faac6 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll @@ -134,22 +134,18 @@ define i16 @for_phi_removed(ptr %src) { ; UNROLL-NO-IC: [[VECTOR_BODY]]: ; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4 -; UNROLL-NO-IC-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0 -; UNROLL-NO-IC-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer -; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0 -; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = select i1 [[TMP4]], <4 x i16> splat (i16 1), <4 x i16> zeroinitializer +; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0 +; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i16 1, i16 0 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 104 ; UNROLL-NO-IC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; UNROLL-NO-IC: [[MIDDLE_BLOCK]]: -; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[TMP2]], i32 3 ; UNROLL-NO-IC-NEXT: br label %[[SCALAR_PH:.*]] ; UNROLL-NO-IC: [[SCALAR_PH]]: ; UNROLL-NO-IC-NEXT: br label %[[LOOP:.*]] ; UNROLL-NO-IC: [[LOOP]]: ; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i16 [ 104, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[P:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ] +; UNROLL-NO-IC-NEXT: [[P:%.*]] = phi i16 [ [[TMP2]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ] ; UNROLL-NO-IC-NEXT: [[L:%.*]] = load i32, ptr [[SRC]], align 4 ; UNROLL-NO-IC-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0 ; UNROLL-NO-IC-NEXT: [[SEL]] = select i1 [[C]], i16 1, i16 0 @@ -200,22 +196,18 @@ define i16 @for_phi_removed(ptr %src) { ; SINK-AFTER: [[VECTOR_BODY]]: ; SINK-AFTER-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; SINK-AFTER-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4 -; SINK-AFTER-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0 -; SINK-AFTER-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; SINK-AFTER-NEXT: [[TMP1:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer -; SINK-AFTER-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0 -; SINK-AFTER-NEXT: [[TMP2:%.*]] = select i1 [[TMP4]], <4 x i16> splat (i16 1), <4 x i16> zeroinitializer +; SINK-AFTER-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0 +; SINK-AFTER-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i16 1, i16 0 ; SINK-AFTER-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; SINK-AFTER-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 108 ; SINK-AFTER-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; SINK-AFTER: [[MIDDLE_BLOCK]]: -; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[TMP2]], i32 3 ; SINK-AFTER-NEXT: br label %[[SCALAR_PH:.*]] ; SINK-AFTER: [[SCALAR_PH]]: ; SINK-AFTER-NEXT: br label %[[LOOP:.*]] ; SINK-AFTER: [[LOOP]]: ; SINK-AFTER-NEXT: [[IV:%.*]] = phi i16 [ 108, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; SINK-AFTER-NEXT: [[P:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ] +; SINK-AFTER-NEXT: [[P:%.*]] = phi i16 [ [[TMP2]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ] ; SINK-AFTER-NEXT: [[L:%.*]] = load i32, ptr [[SRC]], align 4 ; SINK-AFTER-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0 ; SINK-AFTER-NEXT: [[SEL]] = select i1 [[C]], i16 1, i16 0 diff --git a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll index 7b0c366e16c7b..440309d246899 100644 --- a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll +++ b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll @@ -153,3 +153,79 @@ loop: exit: ret void } + +define void @narrow_widen_store_user(i32 %x, ptr noalias %A, ptr noalias %B) { +; VF4IC1-LABEL: define void @narrow_widen_store_user( +; VF4IC1-SAME: i32 [[X:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { +; VF4IC1-NEXT: [[ENTRY:.*:]] +; VF4IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF4IC1: [[VECTOR_PH]]: +; VF4IC1-NEXT: [[TMP0:%.*]] = add i32 [[X]], 1 +; VF4IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0 +; VF4IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; VF4IC1-NEXT: [[TMP5:%.*]] = mul i32 [[TMP0]], 3 +; VF4IC1-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[TMP5]], i64 0 +; VF4IC1-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1]], <4 x i32> poison, <4 x i32> zeroinitializer +; VF4IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF4IC1: [[VECTOR_BODY]]: +; VF4IC1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; VF4IC1-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]] +; VF4IC1-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[B]], i32 [[INDEX]] +; VF4IC1-NEXT: store <4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 4 +; VF4IC1-NEXT: store <4 x i32> [[TMP1]], ptr [[TMP3]], align 4 +; VF4IC1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; VF4IC1-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 +; VF4IC1-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF4IC1: [[MIDDLE_BLOCK]]: +; VF4IC1-NEXT: br label %[[EXIT:.*]] +; VF4IC1: [[EXIT]]: +; VF4IC1-NEXT: ret void +; +; VF2IC2-LABEL: define void @narrow_widen_store_user( +; VF2IC2-SAME: i32 [[X:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[TMP0:%.*]] = add i32 [[X]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[TMP0]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: [[TMP7:%.*]] = mul i32 [[TMP0]], 3 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i32> poison, i32 [[TMP7]], i64 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT1]], <2 x i32> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; VF2IC2-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]] +; VF2IC2-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[B]], i32 [[INDEX]] +; VF2IC2-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP2]], i32 2 +; VF2IC2-NEXT: store <2 x i32> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 4 +; VF2IC2-NEXT: store <2 x i32> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 4 +; VF2IC2-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP3]], i32 2 +; VF2IC2-NEXT: store <2 x i32> [[TMP1]], ptr [[TMP3]], align 4 +; VF2IC2-NEXT: store <2 x i32> [[TMP1]], ptr [[TMP5]], align 4 +; VF2IC2-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; VF2IC2-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: br label %[[EXIT:.*]] +; VF2IC2: [[EXIT]]: +; VF2IC2-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %gep.A = getelementptr i32, ptr %A, i32 %iv + %gep.B = getelementptr i32, ptr %B, i32 %iv + %wide.add = add i32 %x, 1 + %wide.mul = mul i32 %wide.add, 3 + store i32 %wide.add, ptr %gep.A + store i32 %wide.mul, ptr %gep.B + %iv.next = add i32 %iv, 1 + %ec = icmp ne i32 %iv.next, 1024 + br i1 %ec, label %loop, label %exit + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/pr50686.ll b/llvm/test/Transforms/LoopVectorize/pr50686.ll index 878fbec452220..be9110ce0093a 100644 --- a/llvm/test/Transforms/LoopVectorize/pr50686.ll +++ b/llvm/test/Transforms/LoopVectorize/pr50686.ll @@ -18,20 +18,16 @@ define void @m(ptr nocapture %p, ptr nocapture %p2, i32 %q) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[P2]], align 4, !alias.scope [[META0:![0-9]+]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP2:%.*]] = sub nsw <4 x i32> zeroinitializer, [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX9_1]], align 4, !alias.scope [[META0]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <4 x i32> [[TMP2]], [[BROADCAST_SPLAT3]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX9_2]], align 4, !alias.scope [[META0]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[P2]], align 4, !alias.scope [[META0:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = sub nsw i32 0, [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX9_1]], align 4, !alias.scope [[META0]] +; CHECK-NEXT: [[TMP3:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX9_2]], align 4, !alias.scope [[META0]] +; CHECK-NEXT: [[TMP5:%.*]] = sub nsw i32 [[TMP3]], [[TMP4]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP5]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT4]], <4 x i32> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> [[TMP4]], [[BROADCAST_SPLAT5]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDEX]] -; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP7]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]] +; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT5]], ptr [[TMP7]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 60 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll index 2dd6a04ee7d4a..3161a0d5e6f5e 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll @@ -1,6 +1,6 @@ ; REQUIRES: asserts -; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -debug -disable-output %s 2>&1 | FileCheck %s +; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -force-widen-divrem-via-safe-divisor=0 -debug -disable-output %s 2>&1 | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/remarks-inlining.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/remarks-inlining.ll index aaabd18958fae..618ec86ebd35d 100644 --- a/llvm/test/Transforms/LowerMatrixIntrinsics/remarks-inlining.ll +++ b/llvm/test/Transforms/LowerMatrixIntrinsics/remarks-inlining.ll @@ -118,18 +118,18 @@ declare <2 x float> @llvm.matrix.transpose(<2 x float>, i32, i32) !4 = !{i32 2, !"Debug Info Version", i32 3} !5 = distinct !DISubprogram(name: "load_fn", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) !17 = !DIFile(filename: "toplevel.c", directory: "/test") -!16 = distinct !DISubprogram(name: "toplevel", scope: !1, file: !17, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!16 = distinct !DISubprogram(name: "toplevel", scope: !1, file: !17, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !18 = !DIFile(filename: "assign.h", directory: "/test") -!19 = distinct !DISubprogram(name: "assign", scope: !1, file: !18, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!19 = distinct !DISubprogram(name: "assign", scope: !1, file: !18, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !20 = !DIFile(filename: "add.h", directory: "/test") -!21 = distinct !DISubprogram(name: "add_fn", scope: !1, file: !20, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!21 = distinct !DISubprogram(name: "add_fn", scope: !1, file: !20, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !22 = !DIFile(filename: "store.h", directory: "/test") -!23 = distinct !DISubprogram(name: "store_fn", scope: !1, file: !22, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!23 = distinct !DISubprogram(name: "store_fn", scope: !1, file: !22, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !24 = !DIFile(filename: "transpose.h", directory: "/test") -!25 = distinct !DISubprogram(name: "transpose", scope: !1, file: !24, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!25 = distinct !DISubprogram(name: "transpose", scope: !1, file: !24, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !6 = !DISubroutineType(types: !7) diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/remarks.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/remarks.ll index 628ff08b81679..ff41c57055bff 100644 --- a/llvm/test/Transforms/LowerMatrixIntrinsics/remarks.ll +++ b/llvm/test/Transforms/LowerMatrixIntrinsics/remarks.ll @@ -163,26 +163,26 @@ declare void @llvm.matrix.column.major.store(<9 x double>, ptr, i64, i1, i32, i3 !19 = !DILocation(line: 10, column: 20, scope: !5) !20 = !DILocation(line: 10, column: 10, scope: !5) -!21 = distinct !DISubprogram(name: "fn2", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!21 = distinct !DISubprogram(name: "fn2", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !22 = !DILocation(line: 30, column: 20, scope: !21) -!23 = distinct !DISubprogram(name: "fn3", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!23 = distinct !DISubprogram(name: "fn3", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !24 = !DILocation(line: 40, column: 20, scope: !23) -!25 = distinct !DISubprogram(name: "fn4", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!25 = distinct !DISubprogram(name: "fn4", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !26 = !DILocation(line: 50, column: 20, scope: !25) -!27 = distinct !DISubprogram(name: "fn5", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!27 = distinct !DISubprogram(name: "fn5", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !28 = !DILocation(line: 60, column: 20, scope: !27) -!29 = distinct !DISubprogram(name: "fn6", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!29 = distinct !DISubprogram(name: "fn6", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !30 = !DILocation(line: 70, column: 20, scope: !29) -!31 = distinct !DISubprogram(name: "fn7", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!31 = distinct !DISubprogram(name: "fn7", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !32 = !DILocation(line: 80, column: 20, scope: !31) -!33 = distinct !DISubprogram(name: "fn8", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!33 = distinct !DISubprogram(name: "fn8", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !34 = !DILocation(line: 90, column: 20, scope: !33) -!35 = distinct !DISubprogram(name: "fn9", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12) +!35 = distinct !DISubprogram(name: "fn9", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2) !36 = !DILocation(line: 100, column: 20, scope: !35) diff --git a/llvm/test/Transforms/SROA/heterogeneous-poison.ll b/llvm/test/Transforms/SROA/heterogeneous-poison.ll index b4d1e80833c11..63264d2aad969 100644 --- a/llvm/test/Transforms/SROA/heterogeneous-poison.ll +++ b/llvm/test/Transforms/SROA/heterogeneous-poison.ll @@ -7,9 +7,7 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80: %struct.pair = type { i32, i32 } define i32 @t1() !dbg !9 { -; CHECK-LABEL: define i32 @t1( -; CHECK-SAME: ) !dbg [[DBG9:![0-9]+]] { -; CHECK-NEXT: #dbg_value(i32 2, [[META13:![0-9]+]], !DIExpression(DIOpArg(0, i32)), [[META15:![0-9]+]]) +; CHECK-LABEL: define i32 @t1() { ; CHECK-NEXT: ret i32 2 ; %local = alloca i32, align 4 @@ -21,17 +19,14 @@ define i32 @t1() !dbg !9 { define i32 @t2(i1 %cond) !dbg !16 { ; CHECK-LABEL: define i32 @t2( -; CHECK-SAME: i1 [[COND:%.*]]) !dbg [[DBG16:![0-9]+]] { +; CHECK-SAME: i1 [[COND:%.*]]) { ; CHECK-NEXT: br i1 [[COND]], label [[THEN:%.*]], label [[ELSE:%.*]] ; CHECK: then: -; CHECK-NEXT: #dbg_value(i32 42, [[META17:![0-9]+]], !DIExpression(DIOpArg(0, i32)), [[META18:![0-9]+]]) ; CHECK-NEXT: br label [[JOIN:%.*]] ; CHECK: else: -; CHECK-NEXT: #dbg_value(i32 2, [[META17]], !DIExpression(DIOpArg(0, i32)), [[META18]]) ; CHECK-NEXT: br label [[JOIN]] ; CHECK: join: ; CHECK-NEXT: [[LOCAL_0:%.*]] = phi i32 [ 42, [[THEN]] ], [ 2, [[ELSE]] ] -; CHECK-NEXT: #dbg_value(i32 [[LOCAL_0]], [[META17]], !DIExpression(DIOpArg(0, i32)), [[META18]]) ; CHECK-NEXT: ret i32 [[LOCAL_0]] ; %local = alloca i32, align 4 @@ -52,10 +47,7 @@ join: ; preds = %else, %then } define void @t3() !dbg !19 { -; CHECK-LABEL: define void @t3( -; CHECK-SAME: ) !dbg [[DBG19:![0-9]+]] { -; CHECK-NEXT: #dbg_value(i32 42, [[META20:![0-9]+]], !DIExpression(DIOpArg(0, i32), DIOpFragment(0, 32)), [[META25:![0-9]+]]) -; CHECK-NEXT: #dbg_value(i32 43, [[META20]], !DIExpression(DIOpArg(0, i32), DIOpFragment(32, 32)), [[META25]]) +; CHECK-LABEL: define void @t3() { ; CHECK-NEXT: ret void ; %local = alloca %struct.pair, align 4 @@ -70,9 +62,7 @@ define void @t3() !dbg !19 { define i32 @t4() !dbg !26 { ;; FIXME(diexpression-poison): We could probably preserve debug info for the dbg.value here if ;; necessary. Check that we at least do something sensible with it for now. -; CHECK-LABEL: define i32 @t4( -; CHECK-SAME: ) !dbg [[DBG26:![0-9]+]] { -; CHECK-NEXT: #dbg_value(ptr poison, [[META27:![0-9]+]], !DIExpression(DIOpArg(0, ptr addrspace(5)), DIOpDeref(i32)), [[META28:![0-9]+]]) +; CHECK-LABEL: define i32 @t4() { ; CHECK-NEXT: ret i32 42 ; %local = alloca i32, align 4 @@ -87,17 +77,14 @@ define i16 @t5(i1 %cond) !dbg !29 { ;; of the variable !30. This is something that old-style DIExpressions don't ;; support. ; CHECK-LABEL: define i16 @t5( -; CHECK-SAME: i1 [[COND:%.*]]) !dbg [[DBG29:![0-9]+]] { +; CHECK-SAME: i1 [[COND:%.*]]) { ; CHECK-NEXT: br i1 [[COND]], label [[THEN:%.*]], label [[ELSE:%.*]] ; CHECK: then: -; CHECK-NEXT: #dbg_value(i16 42, [[META30:![0-9]+]], !DIExpression(DIOpArg(0, i16), DIOpSExt(i32)), [[META31:![0-9]+]]) ; CHECK-NEXT: br label [[JOIN:%.*]] ; CHECK: else: -; CHECK-NEXT: #dbg_value(i16 43, [[META30]], !DIExpression(DIOpArg(0, i16), DIOpSExt(i32)), [[META31]]) ; CHECK-NEXT: br label [[JOIN]] ; CHECK: join: ; CHECK-NEXT: [[LOCAL_0:%.*]] = phi i16 [ 42, [[THEN]] ], [ 43, [[ELSE]] ] -; CHECK-NEXT: #dbg_value(i16 [[LOCAL_0]], [[META30]], !DIExpression(DIOpArg(0, i16), DIOpSExt(i32)), [[META31]]) ; CHECK-NEXT: ret i16 [[LOCAL_0]] ; %local = alloca i16, align 4 @@ -120,12 +107,7 @@ join: ; preds = %else, %then %struct.pair.pair = type { %struct.pair, %struct.pair } define void @t6() !dbg !32 { -; CHECK-LABEL: define void @t6( -; CHECK-SAME: ) !dbg [[DBG32:![0-9]+]] { -; CHECK-NEXT: #dbg_value(i32 0, [[META33:![0-9]+]], !DIExpression(DIOpArg(0, i32), DIOpFragment(0, 32)), [[META38:![0-9]+]]) -; CHECK-NEXT: #dbg_value(i32 1, [[META33]], !DIExpression(DIOpArg(0, i32), DIOpFragment(32, 32)), [[META38]]) -; CHECK-NEXT: #dbg_value(i32 2, [[META33]], !DIExpression(DIOpArg(0, i32), DIOpFragment(64, 32)), [[META38]]) -; CHECK-NEXT: #dbg_value(i32 3, [[META33]], !DIExpression(DIOpArg(0, i32), DIOpFragment(96, 32)), [[META38]]) +; CHECK-LABEL: define void @t6() { ; CHECK-NEXT: ret void ; %first = alloca %struct.pair, align 4 @@ -195,37 +177,3 @@ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memo !37 = !DILocalVariable(name: "local", scope: !32, file: !1, line: 1, type: !33) !38 = !DILocation(line: 1, column: 1, scope: !32) -;. -; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: [[META1:![0-9]+]], producer: "clang 19", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None) -; CHECK: [[META1]] = !DIFile(filename: "{{.*}}t.cpp", directory: {{.*}}) -; CHECK: [[DBG9]] = distinct !DISubprogram(name: "t1", linkageName: "t1", scope: [[META1]], file: [[META1]], line: 7, type: [[META10:![0-9]+]], scopeLine: 7, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META12:![0-9]+]]) -; CHECK: [[META10]] = !DISubroutineType(types: [[META11:![0-9]+]]) -; CHECK: [[META11]] = !{null} -; CHECK: [[META12]] = !{[[META13]]} -; CHECK: [[META13]] = !DILocalVariable(name: "local", scope: [[DBG9]], file: [[META1]], line: 8, type: [[META14:![0-9]+]]) -; CHECK: [[META14]] = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) -; CHECK: [[META15]] = !DILocation(line: 0, scope: [[DBG9]]) -; CHECK: [[DBG16]] = distinct !DISubprogram(name: "t2", linkageName: "t2", scope: [[META1]], file: [[META1]], line: 7, type: [[META10]], scopeLine: 7, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META12]]) -; CHECK: [[META17]] = !DILocalVariable(name: "local", scope: [[DBG16]], file: [[META1]], line: 1, type: [[META14]]) -; CHECK: [[META18]] = !DILocation(line: 0, scope: [[DBG16]]) -; CHECK: [[DBG19]] = distinct !DISubprogram(name: "t3", linkageName: "t3", scope: [[META1]], file: [[META1]], line: 7, type: [[META10]], scopeLine: 7, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META12]]) -; CHECK: [[META20]] = !DILocalVariable(name: "local", scope: [[DBG19]], file: [[META1]], line: 1, type: [[META21:![0-9]+]]) -; CHECK: [[META21]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "pair", file: [[META1]], line: 2, size: 64, flags: DIFlagTypePassByValue, elements: [[META22:![0-9]+]], identifier: "pair") -; CHECK: [[META22]] = !{[[META23:![0-9]+]], [[META24:![0-9]+]]} -; CHECK: [[META23]] = !DIDerivedType(tag: DW_TAG_member, name: "s1", scope: [[META21]], file: [[META1]], line: 3, baseType: [[META14]], size: 32) -; CHECK: [[META24]] = !DIDerivedType(tag: DW_TAG_member, name: "s2", scope: [[META21]], file: [[META1]], line: 4, baseType: [[META14]], size: 32, offset: 32) -; CHECK: [[META25]] = !DILocation(line: 0, scope: [[DBG19]]) -; CHECK: [[DBG26]] = distinct !DISubprogram(name: "t4", linkageName: "t4", scope: [[META1]], file: [[META1]], line: 7, type: [[META10]], scopeLine: 7, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META12]]) -; CHECK: [[META27]] = !DILocalVariable(name: "local", scope: [[DBG26]], file: [[META1]], line: 1, type: [[META14]]) -; CHECK: [[META28]] = !DILocation(line: 1, column: 1, scope: [[DBG26]]) -; CHECK: [[DBG29]] = distinct !DISubprogram(name: "t5", linkageName: "t5", scope: [[META1]], file: [[META1]], line: 7, type: [[META10]], scopeLine: 7, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META12]]) -; CHECK: [[META30]] = !DILocalVariable(name: "local_i16", scope: [[DBG29]], file: [[META1]], line: 1, type: [[META14]]) -; CHECK: [[META31]] = !DILocation(line: 0, scope: [[DBG29]]) -; CHECK: [[DBG32]] = distinct !DISubprogram(name: "t6", linkageName: "t56", scope: [[META1]], file: [[META1]], line: 7, type: [[META10]], scopeLine: 7, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META12]]) -; CHECK: [[META33]] = !DILocalVariable(name: "local", scope: [[DBG32]], file: [[META1]], line: 1, type: [[META34:![0-9]+]]) -; CHECK: [[META34]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "pair_pair", file: [[META1]], line: 2, size: 128, flags: DIFlagTypePassByValue, elements: [[META35:![0-9]+]], identifier: "pair_pair") -; CHECK: [[META35]] = !{[[META36:![0-9]+]], [[META37:![0-9]+]]} -; CHECK: [[META36]] = !DIDerivedType(tag: DW_TAG_member, name: "s1", scope: [[META34]], file: [[META1]], line: 3, baseType: [[META21]], size: 64) -; CHECK: [[META37]] = !DIDerivedType(tag: DW_TAG_member, name: "s2", scope: [[META34]], file: [[META1]], line: 4, baseType: [[META21]], size: 64, offset: 64) -; CHECK: [[META38]] = !DILocation(line: 0, scope: [[DBG32]]) -;. diff --git a/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll new file mode 100644 index 0000000000000..1d9cf6a5d77fe --- /dev/null +++ b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll @@ -0,0 +1,21 @@ +; REQUIRES: aarch64-registered-target +; RUN: opt -S -passes=declare-runtime-libcalls -mtriple=aarch64-unknown-linux -mattr=+neon,+sve -vector-library=ArmPL < %s | FileCheck %s + +; CHECK: declare void @armpl_svsincos_f32_x(, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, ) [[ATTRS:#[0-9]+]] + +; CHECK: declare void @armpl_svsincos_f64_x(, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, ) [[ATTRS]] + +; CHECK: declare void @armpl_svsincospi_f32_x(, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, ) [[ATTRS]] + +; CHECK: declare void @armpl_svsincospi_f64_x(, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, ) [[ATTRS]] + +; CHECK: declare void @armpl_vsincospiq_f32(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: declare void @armpl_vsincospiq_f64(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: declare aarch64_vector_pcs void @armpl_vsincosq_f32(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: declare aarch64_vector_pcs void @armpl_vsincosq_f64(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + + +; CHECK: attributes [[ATTRS]] = { nocallback nofree nosync nounwind willreturn memory(argmem: write) } diff --git a/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll new file mode 100644 index 0000000000000..2c6900761b1c0 --- /dev/null +++ b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll @@ -0,0 +1,20 @@ +; REQUIRES: aarch64-registered-target +; RUN: opt -S -passes=declare-runtime-libcalls -mtriple=aarch64-unknown-linux -mattr=+neon,+sve -vector-library=sleefgnuabi < %s | FileCheck %s + +; CHECK: declare void @_ZGVnN2vl8l8_sincos(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS:#[0-9]+]] + +; CHECK: declare void @_ZGVnN2vl8l8_sincospi(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: declare void @_ZGVnN4vl4l4_sincosf(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: declare void @_ZGVnN4vl4l4_sincospif(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: declare void @_ZGVsNxvl4l4_sincosf(, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: declare void @_ZGVsNxvl4l4_sincospif(, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: declare void @_ZGVsNxvl8l8_sincos(, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: declare void @_ZGVsNxvl8l8_sincospi(, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]] + +; CHECK: attributes [[ATTRS]] = { nocallback nofree nosync nounwind willreturn memory(argmem: write) } diff --git a/llvm/test/Transforms/Util/annotation-remarks-dbg-info.ll b/llvm/test/Transforms/Util/annotation-remarks-dbg-info.ll index a0fa79aa7edbe..7fc72077ee5b3 100644 --- a/llvm/test/Transforms/Util/annotation-remarks-dbg-info.ll +++ b/llvm/test/Transforms/Util/annotation-remarks-dbg-info.ll @@ -72,5 +72,5 @@ entry: !14 = !{!15} !15 = !DILocalVariable(name: "a", arg: 1, scope: !7, file: !1, line: 1, type: !10) !16 = !DILocation(line: 400, column: 3, scope: !7) -!17 = distinct !DISubprogram(name: "test2", scope: !1, file: !1, line: 21, type: !8, scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !14) +!17 = distinct !DISubprogram(name: "test2", scope: !1, file: !1, line: 21, type: !8, scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) !18 = !DILocation(line: 200, column: 3, scope: !17) diff --git a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp index 6dc647655d92f..77b2f491f6a72 100644 --- a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp +++ b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp @@ -28,7 +28,6 @@ #include "llvm/TargetParser/Host.h" #include -#include #include #if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) && \ !defined(_M_ARM64EC) diff --git a/llvm/tools/llvm-gpu-loader/amdhsa.cpp b/llvm/tools/llvm-gpu-loader/amdhsa.cpp index 5715058d8cfac..fa9ee185549a5 100644 --- a/llvm/tools/llvm-gpu-loader/amdhsa.cpp +++ b/llvm/tools/llvm-gpu-loader/amdhsa.cpp @@ -26,7 +26,6 @@ #include #include #include -#include #include // The implicit arguments of COV5 AMDGPU kernels. diff --git a/llvm/tools/llvm-mc/Disassembler.h b/llvm/tools/llvm-mc/Disassembler.h index dd8525d73200e..76cee9e84c312 100644 --- a/llvm/tools/llvm-mc/Disassembler.h +++ b/llvm/tools/llvm-mc/Disassembler.h @@ -14,8 +14,6 @@ #ifndef LLVM_TOOLS_LLVM_MC_DISASSEMBLER_H #define LLVM_TOOLS_LLVM_MC_DISASSEMBLER_H -#include - namespace llvm { class MemoryBuffer; diff --git a/llvm/tools/llvm-xray/trie-node.h b/llvm/tools/llvm-xray/trie-node.h index b42b0293620dd..f96be592fc364 100644 --- a/llvm/tools/llvm-xray/trie-node.h +++ b/llvm/tools/llvm-xray/trie-node.h @@ -15,7 +15,6 @@ #define LLVM_TOOLS_LLVM_XRAY_STACK_TRIE_H #include -#include #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" diff --git a/llvm/unittests/ADT/STLForwardCompatTest.cpp b/llvm/unittests/ADT/STLForwardCompatTest.cpp index c6ae6e36cfbff..d0092fdb52b01 100644 --- a/llvm/unittests/ADT/STLForwardCompatTest.cpp +++ b/llvm/unittests/ADT/STLForwardCompatTest.cpp @@ -11,7 +11,6 @@ #include "gtest/gtest.h" #include -#include #include #include diff --git a/llvm/unittests/ADT/SequenceTest.cpp b/llvm/unittests/ADT/SequenceTest.cpp index 7aa39568888b2..ab50ad0bb5606 100644 --- a/llvm/unittests/ADT/SequenceTest.cpp +++ b/llvm/unittests/ADT/SequenceTest.cpp @@ -11,7 +11,9 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -#include +#include +#include +#include using namespace llvm; diff --git a/llvm/unittests/ExecutionEngine/Orc/LibraryResolverTest.cpp b/llvm/unittests/ExecutionEngine/Orc/LibraryResolverTest.cpp index b40b61e7a4a6f..441344b281411 100644 --- a/llvm/unittests/ExecutionEngine/Orc/LibraryResolverTest.cpp +++ b/llvm/unittests/ExecutionEngine/Orc/LibraryResolverTest.cpp @@ -23,7 +23,6 @@ #include "gtest/gtest.h" #include -#include #include #include diff --git a/llvm/unittests/IR/VFABIDemanglerTest.cpp b/llvm/unittests/IR/VFABIDemanglerTest.cpp index e30e0f865f719..7d946131d4cba 100644 --- a/llvm/unittests/IR/VFABIDemanglerTest.cpp +++ b/llvm/unittests/IR/VFABIDemanglerTest.cpp @@ -15,7 +15,6 @@ #include "llvm/IR/Module.h" #include "llvm/Support/SourceMgr.h" #include "gtest/gtest.h" -#include using namespace llvm; diff --git a/llvm/unittests/Support/SpecialCaseListTest.cpp b/llvm/unittests/Support/SpecialCaseListTest.cpp index 750fedaf0a436..812e0d3d8520c 100644 --- a/llvm/unittests/Support/SpecialCaseListTest.cpp +++ b/llvm/unittests/Support/SpecialCaseListTest.cpp @@ -308,43 +308,49 @@ TEST_F(SpecialCaseListTest, Version2) { } TEST_F(SpecialCaseListTest, DotSlash) { - std::unique_ptr SCL2 = makeSpecialCaseList("[dot]\n" - "fun:./foo\n" - "src:./bar\n" - "[not]\n" - "fun:foo\n" - "src:bar\n"); - std::unique_ptr SCL3 = makeSpecialCaseList("[dot]\n" - "fun:./foo\n" - "src:./bar\n" - "[not]\n" - "fun:foo\n" - "src:bar\n", - /*Version=*/3); + StringRef IgnoreList = "[dot]\n" + "fun:./foo\n" + "src:./bar\n" + "[not]\n" + "fun:foo\n" + "src:bar\n"; + std::unique_ptr SCL2 = makeSpecialCaseList(IgnoreList); + std::unique_ptr SCL3 = + makeSpecialCaseList(IgnoreList, /*Version=*/3); + std::unique_ptr SCL4 = makeSpecialCaseList(IgnoreList, + /*Version=*/4); EXPECT_TRUE(SCL2->inSection("dot", "fun", "./foo")); EXPECT_TRUE(SCL3->inSection("dot", "fun", "./foo")); + EXPECT_TRUE(SCL4->inSection("dot", "fun", "./foo")); EXPECT_FALSE(SCL2->inSection("dot", "fun", "foo")); EXPECT_FALSE(SCL3->inSection("dot", "fun", "foo")); + EXPECT_FALSE(SCL4->inSection("dot", "fun", "foo")); EXPECT_TRUE(SCL2->inSection("dot", "src", "./bar")); EXPECT_FALSE(SCL3->inSection("dot", "src", "./bar")); + EXPECT_FALSE(SCL4->inSection("dot", "src", "./bar")); EXPECT_FALSE(SCL2->inSection("dot", "src", "bar")); EXPECT_FALSE(SCL3->inSection("dot", "src", "bar")); + EXPECT_FALSE(SCL4->inSection("dot", "src", "bar")); EXPECT_FALSE(SCL2->inSection("not", "fun", "./foo")); EXPECT_FALSE(SCL3->inSection("not", "fun", "./foo")); + EXPECT_FALSE(SCL4->inSection("not", "fun", "./foo")); EXPECT_TRUE(SCL2->inSection("not", "fun", "foo")); EXPECT_TRUE(SCL3->inSection("not", "fun", "foo")); + EXPECT_TRUE(SCL4->inSection("not", "fun", "foo")); EXPECT_FALSE(SCL2->inSection("not", "src", "./bar")); EXPECT_TRUE(SCL3->inSection("not", "src", "./bar")); + EXPECT_TRUE(SCL4->inSection("not", "src", "./bar")); EXPECT_TRUE(SCL2->inSection("not", "src", "bar")); EXPECT_TRUE(SCL3->inSection("not", "src", "bar")); + EXPECT_TRUE(SCL4->inSection("not", "src", "bar")); } TEST_F(SpecialCaseListTest, LinesInSection) { diff --git a/llvm/utils/TableGen/Common/CodeGenHwModes.h b/llvm/utils/TableGen/Common/CodeGenHwModes.h index 55062b6ebeb35..7e45f8ef1de49 100644 --- a/llvm/utils/TableGen/Common/CodeGenHwModes.h +++ b/llvm/utils/TableGen/Common/CodeGenHwModes.h @@ -15,7 +15,6 @@ #include "llvm/ADT/StringRef.h" #include #include -#include #include #include diff --git a/llvm/utils/TableGen/Common/CodeGenTarget.h b/llvm/utils/TableGen/Common/CodeGenTarget.h index b6f9d4209a13e..c1ed70d1739f0 100644 --- a/llvm/utils/TableGen/Common/CodeGenTarget.h +++ b/llvm/utils/TableGen/Common/CodeGenTarget.h @@ -28,7 +28,6 @@ #include "llvm/CodeGenTypes/MachineValueType.h" #include #include -#include #include #include diff --git a/llvm/utils/TableGen/TableGenBackends.h b/llvm/utils/TableGen/TableGenBackends.h index 6e13864d12e16..261e17172abee 100644 --- a/llvm/utils/TableGen/TableGenBackends.h +++ b/llvm/utils/TableGen/TableGenBackends.h @@ -15,8 +15,6 @@ #ifndef LLVM_UTILS_TABLEGEN_TABLEGENBACKENDS_H #define LLVM_UTILS_TABLEGEN_TABLEGENBACKENDS_H -#include - // A TableGen backend is a function that looks like // // EmitFoo(RecordKeeper &RK, raw_ostream &OS /*, anything else you need */ ) diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn index de812cd7d5561..5c5958a94b978 100644 --- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn +++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn @@ -73,6 +73,7 @@ static_library("bugprone") { "ParentVirtualCallCheck.cpp", "PointerArithmeticOnPolymorphicObjectCheck.cpp", "PosixReturnCheck.cpp", + "RandomGeneratorSeedCheck.cpp", "RawMemoryCallOnNonTrivialTypeCheck.cpp", "RedundantBranchConditionCheck.cpp", "ReservedIdentifierCheck.cpp", diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn index 65c149b9d9360..d373ac52e8963 100644 --- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn +++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn @@ -17,6 +17,5 @@ static_library("cert") { sources = [ "CERTTidyModule.cpp", "LimitedRandomnessCheck.cpp", - "ProperlySeededRandomGeneratorCheck.cpp", ] } diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn index 66531c706ac7b..e377b380685fd 100644 --- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn +++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn @@ -402,6 +402,8 @@ if (current_toolchain == default_toolchain) { "__configuration/abi.h", "__configuration/availability.h", "__configuration/compiler.h", + "__configuration/experimental.h", + "__configuration/hardening.h", "__configuration/language.h", "__configuration/platform.h", "__coroutine/coroutine_handle.h", diff --git a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn index b7c471058815a..73850044814ae 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn @@ -67,7 +67,6 @@ static_library("Analysis") { "InlineAdvisor.cpp", "InlineCost.cpp", "InlineOrder.cpp", - "InlineSizeEstimatorAnalysis.cpp", "InstCount.cpp", "InstructionPrecedenceTracking.cpp", "InstructionSimplify.cpp", diff --git a/llvm/utils/gn/secondary/llvm/lib/IR/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/IR/BUILD.gn index 22aa0b6418132..8037c8d693cb8 100644 --- a/llvm/utils/gn/secondary/llvm/lib/IR/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/IR/BUILD.gn @@ -82,6 +82,7 @@ static_library("IR") { "SafepointIRVerifier.cpp", "Statepoint.cpp", "StructuralHash.cpp", + "SystemLibraries.cpp", "Type.cpp", "TypeFinder.cpp", "TypedPointerType.cpp", diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td index 6724d4c483101..a9b2b9f39519d 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -28,7 +28,8 @@ class Bufferization_Op traits = []> def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor", [AttrSizedOperandSegments, BufferizableOpInterface, - DeclareOpInterfaceMethods]> { + DeclareOpInterfaceMethods]> { let summary = "allocate buffer for a tensor"; let description = [{ @@ -219,7 +220,8 @@ def Bufferization_MaterializeInDestinationOp : Bufferization_Op<"materialize_in_destination", [AllElementTypesMatch<["source", "dest"]>, BufferizableOpInterface, DestinationStyleOpInterface, - DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods { let assemblyFormat = "attr-dict"; } +def ROCDLGlobalBuffer : LLVM_PointerInAddressSpace<1>; def ROCDLBufferLDS : LLVM_PointerInAddressSpace<3>; def ROCDL_BarrierInitOp : ROCDL_IntrOp<"s.barrier.init", [], [], [], 0, 0, 0, 0, [1], ["id"]>, @@ -631,8 +632,6 @@ def ROCDL_wmma_i32_16x16x64_iu8 : ROCDL_Wmma_IntrOp<"wmma.i32.16x16x64.iu8", [1] //===---------------------------------------------------------------------===// // LDS transpose intrinsics (available in GFX950) -def ROCDLGlobalBuffer : LLVM_PointerInAddressSpace<1>; - class ROCDL_LDS_Read_Tr_IntrOp : ROCDL_IntrOp { dag args = (ins Arg:$ptr); @@ -650,6 +649,58 @@ def ROCDL_ds_read_tr8_b64 : ROCDL_LDS_Read_Tr_IntrOp<"ds.read.tr8.b64">; def ROCDL_ds_read_tr6_b96 : ROCDL_LDS_Read_Tr_IntrOp<"ds.read.tr6.b96">; def ROCDL_ds_read_tr16_b64 : ROCDL_LDS_Read_Tr_IntrOp<"ds.read.tr16.b64">; + + +//===---------------------------------------------------------------------===// +// Glb/DS load-transpose intrinsics (available in GFX1250+) + +class AddrKind { + string name = n; + int space = s; +} +def GlobalAddrKind : AddrKind<"global", 1>; +def DSAddrKind : AddrKind<"ds", 3>; + +class ROCDL_TrLoadOpMeta { + AddrKind addrKind = kind; + string inBits = !cast(inElemBits); + string outBits = !cast(outElemBits); + string inBitsEnc = !if(!eq(addrKind.space, 1), + !if(!or(!eq(inElemBits, 8), !eq(inElemBits, 16)), "", inBits), inBits); + string mnemonic = addrKind.name # ".load.tr" # inBitsEnc # ".b" # outBits; +} + +class ROCDL_TrLoadOp : + ROCDL_IntrOp { + + dag args = (ins Arg, "", [MemRead]>:$ptr); + let arguments = !con(args, baseArgs); + let summary = "Loads and transposes a matrix from " # meta.addrKind.name # " memory to registers (available in gfx1250+)."; + let description = [{ + Load a matrix of }] # meta.inBits # [{-bit data from the }] # meta.addrKind.name # [{ memory, + transpose data between row-major and column-major order, + and store the result into a }] # meta.outBits # [{-bit vector register. + + Available in gfx1250+. + }]; + let assemblyFormat = "$ptr attr-dict `:` qualified(type($ptr)) `->` type($res)"; + let extraClassDefinition = [{ + ::llvm::SmallVector<::mlir::Value> $cppClass::getAccessedOperands() { + return {getPtr()}; + } + }]; +} + +def ROCDL_GlobalLoadTr4_B64 : ROCDL_TrLoadOp>; +def ROCDL_GlobalLoadTr8_B64 : ROCDL_TrLoadOp>; +def ROCDL_GlobalLoadTr6_B96 : ROCDL_TrLoadOp>; +def ROCDL_GlobalLoadTr8_B128 : ROCDL_TrLoadOp>; + +def ROCDL_DsLoadTr4_B64 : ROCDL_TrLoadOp>; +def ROCDL_DsLoadTr8_B64 : ROCDL_TrLoadOp>; +def ROCDL_DsLoadTr6_B96 : ROCDL_TrLoadOp>; +def ROCDL_DsLoadTr16_B128 : ROCDL_TrLoadOp>; + //===---------------------------------------------------------------------===// // Load to LDS intrinsic (available in GFX9 and GFX10) //===---------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td index 7ff44c2e1d2ed..2754ee3b4f586 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td @@ -94,7 +94,8 @@ def Linalg_IndexOp : Linalg_Op<"index", [Pure]>, def Linalg_SoftmaxOp : Linalg_Op<"softmax", [DestinationStyleOpInterface, PredOpTrait<"input and output have same element type", TCopVTEtIsSameAs<0, 1>>, - DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods traits = []> : DeclareOpInterfaceMethods, DestinationStyleOpInterface, LinalgRelayoutOpInterface, ConditionallySpeculatable, NoMemoryEffect, - DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, TypesMatchWith<"result type matches type of dest", "dest", "result", "$_self">])> { diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td index 8965302a58c5d..0bf22928f6900 100644 --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td @@ -1783,7 +1783,8 @@ class MemRef_ReassociativeReshapeOp traits = []> : def MemRef_ExpandShapeOp : MemRef_ReassociativeReshapeOp<"expand_shape", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods]> { + DeclareOpInterfaceMethods]> { let summary = "operation to produce a memref with a higher rank."; let description = [{ The `memref.expand_shape` op produces a new view with a higher rank whose diff --git a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td index f3e40aaa29075..c403386bd214a 100644 --- a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td @@ -164,6 +164,11 @@ def ResolveRankedShapeTypeResultDimsPass implement the `ReifyRankedShapedTypeOpInterface` in terms of shapes of its operands. }]; + let options = [ + Option<"errorOnPatternIterationLimit", "error-on-pattern-iteration-limit", "bool", + /*default=*/"true", + "Throw an error when pattern rewriter hits iteration limit">, + ]; let dependentDialects = [ "memref::MemRefDialect", "tensor::TensorDialect" ]; @@ -177,6 +182,11 @@ def ResolveShapedTypeResultDimsPass : Pass<"resolve-shaped-type-result-dims"> { `ReifyRankedShapedTypeOpInterface` in terms of shapes of its operands. }]; + let options = [ + Option<"errorOnPatternIterationLimit", "error-on-pattern-iteration-limit", "bool", + /*default=*/"true", + "Throw an error when pattern rewriter hits iteration limit">, + ]; let dependentDialects = [ "affine::AffineDialect", "memref::MemRefDialect", "tensor::TensorDialect" ]; diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td index 2453cf5b5b5a4..3e93e58575e65 100644 --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td @@ -131,7 +131,9 @@ def Tensor_CastOp : Tensor_Op<"cast", [ def Tensor_ConcatOp : Tensor_Op<"concat", [Pure, DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods]> { + DeclareOpInterfaceMethods, + ]> { let summary = "tensor concatenation operation"; let description = [{ The "concat" operation constructs a tensor out of a variadic list of input @@ -261,7 +263,8 @@ def Tensor_DimOp : Tensor_Op<"dim", [ def Tensor_EmptyOp : Tensor_Op<"empty", [Pure, - DeclareOpInterfaceMethods]> { + DeclareOpInterfaceMethods]> { let summary = "empty tensor operation"; let description = [{ @@ -358,7 +361,8 @@ def Tensor_ExtractOp : Tensor_Op<"extract", [ def Tensor_ExtractSliceOp : Tensor_OpWithOffsetSizesAndStrides<"extract_slice", [ DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, AttrSizedOperandSegments, Pure, OffsetSizeAndStrideOpInterface @@ -740,7 +744,8 @@ def Tensor_GatherOp : Tensor_Op<"gather", [ def Tensor_GenerateOp : Tensor_Op<"generate", [ DeclareOpInterfaceMethods, RecursiveMemoryEffects, - DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, SingleBlockImplicitTerminator<"mlir::tensor::YieldOp">]> { let summary = "Creates a dynamically sized tensor from elements"; let description = [{ @@ -835,7 +840,8 @@ def Tensor_InsertOp : Tensor_Op<"insert", [ def Tensor_InsertSliceOp : Tensor_OpWithOffsetSizesAndStrides<"insert_slice", [ DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, AttrSizedOperandSegments, DestinationStyleOpInterface, Pure, @@ -1256,7 +1262,8 @@ def Tensor_CollapseShapeOp : Tensor_ReassociativeReshapeOp<"collapse_shape"> { def Tensor_PadOp : Tensor_Op<"pad", [ DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, AttrSizedOperandSegments, Pure, SingleBlockImplicitTerminator<"mlir::tensor::YieldOp">]> { @@ -1764,7 +1771,8 @@ def Tensor_ScatterOp : Tensor_Op<"scatter", [ def Tensor_SplatOp : Tensor_Op<"splat", [ DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, Pure, TypesMatchWith<"operand type matches element type of result", "aggregate", "input", diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td index 467dba3232f2b..31d1e80f2772c 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -2219,7 +2219,8 @@ def Tosa_TileOp : Tosa_InferShapedTypeOp<"tile"> { // Operator: transpose //===----------------------------------------------------------------------===// def Tosa_TransposeOp : Tosa_InferShapedTypeOp<"transpose", - [DeclareOpInterfaceMethods, + [DeclareOpInterfaceMethods, AllElementTypesMatch<["input1", "output"]>]> { let summary = "Transpose operator."; diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td index 9c35c07a7e587..3f27d690f949b 100644 --- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td +++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td @@ -379,6 +379,17 @@ def XeGPU_LayoutAttr : XeGPUAttr<"Layout", "layout", [DistributeLayoutAttr]> { ); let builders = [ + AttrBuilder<(ins "llvm::ArrayRef": $inst_data), + [{ + auto sg_layout = DenseI32ArrayAttr(); + auto sg_data = DenseI32ArrayAttr(); + auto order = DenseI32ArrayAttr(); + auto lane_layout = DenseI32ArrayAttr(); + auto lane_data = DenseI32ArrayAttr(); + return $_get($_ctxt, sg_layout, sg_data, + DenseI32ArrayAttr::get($_ctxt, inst_data), + lane_layout, lane_data, order); + }]>, AttrBuilder<(ins "llvm::ArrayRef": $inst_data, "llvm::ArrayRef": $lane_layout, "llvm::ArrayRef": $lane_data), diff --git a/mlir/include/mlir/Dialect/XeGPU/TransformOps/XeGPUTransformOps.td b/mlir/include/mlir/Dialect/XeGPU/TransformOps/XeGPUTransformOps.td index b985d5450be0e..34f333e556deb 100644 --- a/mlir/include/mlir/Dialect/XeGPU/TransformOps/XeGPUTransformOps.td +++ b/mlir/include/mlir/Dialect/XeGPU/TransformOps/XeGPUTransformOps.td @@ -16,6 +16,24 @@ include "mlir/Dialect/Transform/IR/TransformTypes.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/IR/OpBase.td" +def GetDescOp : Op, + NavigationTransformOpTrait, MemoryEffectsOpInterface +]> { + + let summary = "Get a handle to the descriptor op of a value."; + let description = [{ + Traces the producers of the given value until an `xegpu.create_nd_tdesc` + descriptor op is found. Returns a handle to it. Currently traces + producers by following only the first operand of producer ops. + }]; + + let arguments = (ins TransformValueHandleTypeInterface:$target); + + let results = (outs TransformHandleTypeInterface:$descHandle); + let assemblyFormat = "$target attr-dict `:` functional-type(operands, results)"; +} + def SetDescLayoutOp : Op, @@ -31,16 +49,16 @@ def SetDescLayoutOp : Op : $sg_layout, - Variadic : $sg_data, - Variadic : $inst_data, + TransformHandleTypeInterface:$target, + Variadic:$sg_layout, + Variadic:$sg_data, + Variadic:$inst_data, DefaultValuedOptionalAttr:$static_sg_layout, DefaultValuedOptionalAttr:$static_sg_data, DefaultValuedOptionalAttr:$static_inst_data ); - let results = (outs TransformHandleTypeInterface : $transformed); + let results = (outs TransformHandleTypeInterface:$transformed); let builders = [ OpBuilder<(ins "Value":$target, "ArrayRef":$mixedSgLayout, @@ -78,4 +96,69 @@ def SetDescLayoutOp : Op, + TransformOpInterface +]> { + + let summary = "Set xegpu.layout attribute of an op."; + let description = [{ + Sets the `xegpu.layout` attribute of an op. If `result=true`, sets the + `layout_result_{index}`, otherwise `layout_operand_{index}` attribute. The + target operand/result value is defined by the `index` argument. The layout + is defined by the `sg_layout`, `sg_data` and optional `inst_data` attributes. + }]; + + let arguments = (ins TransformHandleTypeInterface:$target, + DefaultValuedOptionalAttr:$index, + Variadic:$sg_layout, + Variadic:$sg_data, + Variadic:$inst_data, + DefaultValuedOptionalAttr:$static_sg_layout, + DefaultValuedOptionalAttr:$static_sg_data, + DefaultValuedOptionalAttr:$static_inst_data, + DefaultValuedAttr:$result + ); + + let results = (outs); + let builders = [ + OpBuilder<(ins "Value":$target, + "int64_t":$index, + "ArrayRef":$mixedSgLayout, + "ArrayRef":$mixedSgData, + "ArrayRef":$mixedInstData, + CArg<"bool", "false">:$result + )>, + ]; + + let assemblyFormat = [{ + $target (`result` $result^)? (`index` `=` $index^)? + `sg_layout` `=` custom($sg_layout, $static_sg_layout) + `sg_data` `=` custom($sg_data, $static_sg_data) + (`inst_data` `=` custom($inst_data, $static_inst_data)^)? + attr-dict `:` qualified(type(operands)) + }]; + + let extraClassDeclaration = [{ + ::mlir::DiagnosedSilenceableFailure apply( + ::mlir::transform::TransformRewriter &rewriter, + ::mlir::transform::TransformResults &transformResults, + ::mlir::transform::TransformState &state); + + ::llvm::SmallVector<::mlir::OpFoldResult> getMixedSgLayout() { + Builder b(getContext()); + return getMixedValues(getStaticSgLayout(), getSgLayout(), b); + } + ::llvm::SmallVector<::mlir::OpFoldResult> getMixedSgData() { + Builder b(getContext()); + return getMixedValues(getStaticSgData(), getSgData(), b); + } + ::llvm::SmallVector<::mlir::OpFoldResult> getMixedInstData() { + Builder b(getContext()); + return getMixedValues(getStaticInstData(), getInstData(), b); + } + }]; +} + #endif // XEGPU_TRANSFORM_OPS diff --git a/mlir/include/mlir/Dialect/XeGPU/Transforms/Passes.td b/mlir/include/mlir/Dialect/XeGPU/Transforms/Passes.td index e42799689e490..12270af870b3b 100644 --- a/mlir/include/mlir/Dialect/XeGPU/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/XeGPU/Transforms/Passes.td @@ -47,7 +47,7 @@ def XeGPUPropagateLayout : Pass<"xegpu-propagate-layout"> { Option< "layoutKind", "layout-kind", "std::string", /*default=*/"\"lane\"", - "Propagate a `sg` / `inst` / `lane` level of xegpu layouts."> + "Propagate `inst` / `lane` level of xegpu layouts."> ]; } diff --git a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h index 4fcbeff9df560..1bfb66e681d8d 100644 --- a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h +++ b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h @@ -33,6 +33,10 @@ using ReifiedRankedShapedTypeDims = SmallVector>; LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes); +FailureOr> +reifyShapeOfResult(OpBuilder &b, Operation *op, int resultIndex); +FailureOr reifyDimOfResult(OpBuilder &b, Operation *op, + int resultIndex, int dim); /// Adaptor class to abstract the differences between whether value is from /// a ShapedType or ShapedTypeComponents or DenseIntElementsAttribute. diff --git a/mlir/include/mlir/Interfaces/InferTypeOpInterface.td b/mlir/include/mlir/Interfaces/InferTypeOpInterface.td index 1a2c05fc16ed5..67568f731f597 100644 --- a/mlir/include/mlir/Interfaces/InferTypeOpInterface.td +++ b/mlir/include/mlir/Interfaces/InferTypeOpInterface.td @@ -361,20 +361,76 @@ def ReifyRankedShapedTypeOpInterface : let methods = [ InterfaceMethod< /*desc=*/[{ - Reify the shape of the result of an operation (typically in terms of the - shape of its operands). + Reify the shapes of all the result of an operation (typically in terms + of the shape of its operands). `reifiedReturnShapes` is populated with one vector per op result. Each of those vectors contains an OpFoldResult for each dimension of the shaped type. The given builder may be used to insert ops that compute result shapes. - If the shape of a particular result cannot be computed it must be empty. + If the shape of a particular result cannot be computed it in terms of + its operands it must be left empty. If any dimension of the result cannot + be computed it must be set to OpFoldResult(). }], /*retTy=*/"::llvm::LogicalResult", /*methodName=*/"reifyResultShapes", /*args=*/(ins "::mlir::OpBuilder &":$builder, - "::mlir::ReifiedRankedShapedTypeDims &":$reifiedReturnShapes) + "::mlir::ReifiedRankedShapedTypeDims &":$reifiedReturnShapes), + /*methodBody=*/"", + /*defaultImplementation=*/[{ return ::mlir::failure(); }] + >, + InterfaceMethod< + /*desc=*/[{ + Reify the shape of a single result of an operation (typically in terms + of the shape of its operands). + + Returns the shape of a single result of the operation as a + `SmallVector`, one per dimension of the shaped type. The + given builder may be used to insert ops that compute result shapes. + + If any dimension of the result cannot be computed it must be set to + OpFoldResult(). + }], + /*retTy=*/"::llvm::FailureOr<::llvm::SmallVector<::mlir::OpFoldResult>>", + /*methodName=*/"reifyShapeOfResult", + /*args=*/(ins "::mlir::OpBuilder &":$builder, + "int":$resultIndex), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + ReifiedRankedShapedTypeDims reifiedShapes; + if (failed(cast($_op.getOperation()).reifyResultShapes(builder, reifiedShapes))) + return failure(); + if (resultIndex < 0 || resultIndex >= static_cast(reifiedShapes.size())) + return $_op.emitOpError("invalid result index"); + return reifiedShapes[resultIndex]; + }] + >, + InterfaceMethod< + /*desc=*/[{ + Reify the shape of a dimension of a given result of an operation + (typically in terms of the shape of its operands). + + Returns the shape of a specific dimension of a result of the operation as + an OpFoldResult. The given builder may be used to insert ops that compute + the shapes. + + If the dimension of the result cannot be computed the method must return + `failure()`. + }], + /*retTy=*/"::llvm::FailureOr<::mlir::OpFoldResult>", + /*methodName=*/"reifyDimOfResult", + /*args=*/(ins "::mlir::OpBuilder &":$builder, + "int":$resultIndex, "int":$dim), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + auto shapes = cast($_op.getOperation()).reifyShapeOfResult(builder, resultIndex); + if (failed(shapes)) + return failure(); + if (dim < 0 || dim >= static_cast((*shapes).size())) + return $_op.emitOpError("invalid dimension"); + return (*shapes)[dim]; + }] > ]; } diff --git a/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h b/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h index b7394387b0f9a..79dfd7a2795f0 100644 --- a/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h +++ b/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h @@ -359,6 +359,20 @@ class MlirOptMainConfig { /// the loaded IR. using PassPipelineFn = llvm::function_ref; +/// Register basic command line options. +/// - toolName is used for the header displayed by `--help`. +/// - registry should contain all the dialects that can be parsed in the source. +/// - return std::string for help header. +std::string registerCLIOptions(llvm::StringRef toolName, + DialectRegistry ®istry); + +/// Parse command line options. +/// - helpHeader is used for the header displayed by `--help`. +/// - return std::pair for +/// inputFilename and outputFilename command line option values. +std::pair parseCLIOptions(int argc, char **argv, + llvm::StringRef helpHeader); + /// Register and parse command line options. /// - toolName is used for the header displayed by `--help`. /// - registry should contain all the dialects that can be parsed in the source. diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp index cda4fe19c16f8..d90f27bd037e6 100644 --- a/mlir/lib/Bindings/Python/IRCore.cpp +++ b/mlir/lib/Bindings/Python/IRCore.cpp @@ -32,33 +32,6 @@ using llvm::SmallVector; using llvm::StringRef; using llvm::Twine; -//------------------------------------------------------------------------------ -// Docstrings (trivial, non-duplicated docstrings are included inline). -//------------------------------------------------------------------------------ - -static const char kContextParseTypeDocstring[] = - R"(Parses the assembly form of a type. - -Returns a Type object or raises an MLIRError if the type cannot be parsed. - -See also: https://mlir.llvm.org/docs/LangRef/#type-system -)"; - -static const char kContextGetCallSiteLocationDocstring[] = - R"(Gets a Location representing a caller and callsite)"; - -static const char kContextGetFileLocationDocstring[] = - R"(Gets a Location representing a file, line and column)"; - -static const char kContextGetFileRangeDocstring[] = - R"(Gets a Location representing a file, line and column range)"; - -static const char kContextGetFusedLocationDocstring[] = - R"(Gets a Location representing a fused location with optional metadata)"; - -static const char kContextGetNameLocationDocString[] = - R"(Gets a Location representing a named location with optional child location)"; - static const char kModuleParseDocstring[] = R"(Parses a module's assembly format from a string. @@ -67,132 +40,12 @@ Returns a new MlirModule or raises an MLIRError if the parsing fails. See also: https://mlir.llvm.org/docs/LangRef/ )"; -static const char kModuleCAPICreate[] = - R"(Creates a Module from a MlirModule wrapped by a capsule (i.e. module._CAPIPtr). -Note this returns a new object BUT _clear_mlir_module(module) must be called to -prevent double-frees (of the underlying mlir::Module). -)"; - -static const char kOperationCreateDocstring[] = - R"(Creates a new operation. - -Args: - name: Operation name (e.g. "dialect.operation"). - results: Sequence of Type representing op result types. - attributes: Dict of str:Attribute. - successors: List of Block for the operation's successors. - regions: Number of regions to create. - location: A Location object (defaults to resolve from context manager). - ip: An InsertionPoint (defaults to resolve from context manager or set to - False to disable insertion, even with an insertion point set in the - context manager). - infer_type: Whether to infer result types. -Returns: - A new "detached" Operation object. Detached operations can be added - to blocks, which causes them to become "attached." -)"; - -static const char kOperationPrintDocstring[] = - R"(Prints the assembly form of the operation to a file like object. - -Args: - file: The file like object to write to. Defaults to sys.stdout. - binary: Whether to write bytes (True) or str (False). Defaults to False. - large_elements_limit: Whether to elide elements attributes above this - number of elements. Defaults to None (no limit). - large_resource_limit: Whether to elide resource attributes above this - number of characters. Defaults to None (no limit). If large_elements_limit - is set and this is None, the behavior will be to use large_elements_limit - as large_resource_limit. - enable_debug_info: Whether to print debug/location information. Defaults - to False. - pretty_debug_info: Whether to format debug information for easier reading - by a human (warning: the result is unparseable). - print_generic_op_form: Whether to print the generic assembly forms of all - ops. Defaults to False. - use_local_Scope: Whether to print in a way that is more optimized for - multi-threaded access but may not be consistent with how the overall - module prints. - assume_verified: By default, if not printing generic form, the verifier - will be run and if it fails, generic form will be printed with a comment - about failed verification. While a reasonable default for interactive use, - for systematic use, it is often better for the caller to verify explicitly - and report failures in a more robust fashion. Set this to True if doing this - in order to avoid running a redundant verification. If the IR is actually - invalid, behavior is undefined. - skip_regions: Whether to skip printing regions. Defaults to False. -)"; - -static const char kOperationPrintStateDocstring[] = - R"(Prints the assembly form of the operation to a file like object. - -Args: - file: The file like object to write to. Defaults to sys.stdout. - binary: Whether to write bytes (True) or str (False). Defaults to False. - state: AsmState capturing the operation numbering and flags. -)"; - -static const char kOperationGetAsmDocstring[] = - R"(Gets the assembly form of the operation with all options available. - -Args: - binary: Whether to return a bytes (True) or str (False) object. Defaults to - False. - ... others ...: See the print() method for common keyword arguments for - configuring the printout. -Returns: - Either a bytes or str object, depending on the setting of the 'binary' - argument. -)"; - -static const char kOperationPrintBytecodeDocstring[] = - R"(Write the bytecode form of the operation to a file like object. - -Args: - file: The file like object to write to. - desired_version: The version of bytecode to emit. -Returns: - The bytecode writer status. -)"; - -static const char kOperationStrDunderDocstring[] = - R"(Gets the assembly form of the operation with default options. - -If more advanced control over the assembly formatting or I/O options is needed, -use the dedicated print or get_asm method, which supports keyword arguments to -customize behavior. -)"; - static const char kDumpDocstring[] = - R"(Dumps a debug representation of the object to stderr.)"; - -static const char kAppendBlockDocstring[] = - R"(Appends a new block, with argument types as positional args. - -Returns: - The created block. -)"; - -static const char kValueDunderStrDocstring[] = - R"(Returns the string form of the value. - -If the value is a block argument, this is the assembly form of its type and the -position in the argument list. If the value is an operation result, this is -equivalent to printing the operation that produced it. -)"; - -static const char kGetNameAsOperand[] = - R"(Returns the string form of value as an operand (i.e., the ValueID). -)"; - -static const char kValueReplaceAllUsesWithDocstring[] = - R"(Replace all uses of value with the new value, updating anything in -the IR that uses 'self' to use the other value instead. -)"; + "Dumps a debug representation of the object to stderr."; static const char kValueReplaceAllUsesExceptDocstring[] = - R"("Replace all uses of this value with the 'with' value, except for those -in 'exceptions'. 'exceptions' can be either a single operation or a list of + R"(Replace all uses of this value with the `with` value, except for those +in `exceptions`. `exceptions` can be either a single operation or a list of operations. )"; @@ -274,22 +127,26 @@ struct PyGlobalDebugFlag { // Debug flags. nb::class_(m, "_GlobalDebug") .def_prop_rw_static("flag", &PyGlobalDebugFlag::get, - &PyGlobalDebugFlag::set, "LLVM-wide debug flag") + &PyGlobalDebugFlag::set, "LLVM-wide debug flag.") .def_static( "set_types", [](const std::string &type) { nb::ft_lock_guard lock(mutex); mlirSetGlobalDebugType(type.c_str()); }, - "types"_a, "Sets specific debug types to be produced by LLVM") - .def_static("set_types", [](const std::vector &types) { - std::vector pointers; - pointers.reserve(types.size()); - for (const std::string &str : types) - pointers.push_back(str.c_str()); - nb::ft_lock_guard lock(mutex); - mlirSetGlobalDebugTypes(pointers.data(), pointers.size()); - }); + "types"_a, "Sets specific debug types to be produced by LLVM.") + .def_static( + "set_types", + [](const std::vector &types) { + std::vector pointers; + pointers.reserve(types.size()); + for (const std::string &str : types) + pointers.push_back(str.c_str()); + nb::ft_lock_guard lock(mutex); + mlirSetGlobalDebugTypes(pointers.data(), pointers.size()); + }, + "types"_a, + "Sets multiple specific debug types to be produced by LLVM."); } private: @@ -316,12 +173,18 @@ struct PyAttrBuilderMap { static void bind(nb::module_ &m) { nb::class_(m, "AttrBuilder") - .def_static("contains", &PyAttrBuilderMap::dunderContains) - .def_static("get", &PyAttrBuilderMap::dunderGetItemNamed) + .def_static("contains", &PyAttrBuilderMap::dunderContains, + "attribute_kind"_a, + "Checks whether an attribute builder is registered for the " + "given attribute kind.") + .def_static("get", &PyAttrBuilderMap::dunderGetItemNamed, + "attribute_kind"_a, + "Gets the registered attribute builder for the given " + "attribute kind.") .def_static("insert", &PyAttrBuilderMap::dunderSetItemNamed, "attribute_kind"_a, "attr_builder"_a, "replace"_a = false, "Register an attribute builder for building MLIR " - "attributes from python values."); + "attributes from Python values."); } }; @@ -357,8 +220,10 @@ class PyRegionIterator { static void bind(nb::module_ &m) { nb::class_(m, "RegionIterator") - .def("__iter__", &PyRegionIterator::dunderIter) - .def("__next__", &PyRegionIterator::dunderNext); + .def("__iter__", &PyRegionIterator::dunderIter, + "Returns an iterator over the regions in the operation.") + .def("__next__", &PyRegionIterator::dunderNext, + "Returns the next region in the iteration."); } private: @@ -386,7 +251,8 @@ class PyRegionList : public Sliceable { } static void bindDerived(ClassTy &c) { - c.def("__iter__", &PyRegionList::dunderIter); + c.def("__iter__", &PyRegionList::dunderIter, + "Returns an iterator over the regions in the sequence."); } private: @@ -430,8 +296,10 @@ class PyBlockIterator { static void bind(nb::module_ &m) { nb::class_(m, "BlockIterator") - .def("__iter__", &PyBlockIterator::dunderIter) - .def("__next__", &PyBlockIterator::dunderNext); + .def("__iter__", &PyBlockIterator::dunderIter, + "Returns an iterator over the blocks in the operation's region.") + .def("__next__", &PyBlockIterator::dunderNext, + "Returns the next block in the iteration."); } private: @@ -493,10 +361,19 @@ class PyBlockList { static void bind(nb::module_ &m) { nb::class_(m, "BlockList") - .def("__getitem__", &PyBlockList::dunderGetItem) - .def("__iter__", &PyBlockList::dunderIter) - .def("__len__", &PyBlockList::dunderLen) - .def("append", &PyBlockList::appendBlock, kAppendBlockDocstring, + .def("__getitem__", &PyBlockList::dunderGetItem, + "Returns the block at the specified index.") + .def("__iter__", &PyBlockList::dunderIter, + "Returns an iterator over blocks in the operation's region.") + .def("__len__", &PyBlockList::dunderLen, + "Returns the number of blocks in the operation's region.") + .def("append", &PyBlockList::appendBlock, + R"( + Appends a new block, with argument types as positional args. + + Returns: + The created block. + )", nb::arg("args"), nb::kw_only(), nb::arg("arg_locs") = std::nullopt); } @@ -527,8 +404,10 @@ class PyOperationIterator { static void bind(nb::module_ &m) { nb::class_(m, "OperationIterator") - .def("__iter__", &PyOperationIterator::dunderIter) - .def("__next__", &PyOperationIterator::dunderNext); + .def("__iter__", &PyOperationIterator::dunderIter, + "Returns an iterator over the operations in an operation's block.") + .def("__next__", &PyOperationIterator::dunderNext, + "Returns the next operation in the iteration."); } private: @@ -584,9 +463,12 @@ class PyOperationList { static void bind(nb::module_ &m) { nb::class_(m, "OperationList") - .def("__getitem__", &PyOperationList::dunderGetItem) - .def("__iter__", &PyOperationList::dunderIter) - .def("__len__", &PyOperationList::dunderLen); + .def("__getitem__", &PyOperationList::dunderGetItem, + "Returns the operation at the specified index.") + .def("__iter__", &PyOperationList::dunderIter, + "Returns an iterator over operations in the list.") + .def("__len__", &PyOperationList::dunderLen, + "Returns the number of operations in the list."); } private: @@ -609,8 +491,10 @@ class PyOpOperand { static void bind(nb::module_ &m) { nb::class_(m, "OpOperand") - .def_prop_ro("owner", &PyOpOperand::getOwner) - .def_prop_ro("operand_number", &PyOpOperand::getOperandNumber); + .def_prop_ro("owner", &PyOpOperand::getOwner, + "Returns the operation that owns this operand.") + .def_prop_ro("operand_number", &PyOpOperand::getOperandNumber, + "Returns the operand number in the owning operation."); } private: @@ -634,8 +518,10 @@ class PyOpOperandIterator { static void bind(nb::module_ &m) { nb::class_(m, "OpOperandIterator") - .def("__iter__", &PyOpOperandIterator::dunderIter) - .def("__next__", &PyOpOperandIterator::dunderNext); + .def("__iter__", &PyOpOperandIterator::dunderIter, + "Returns an iterator over operands.") + .def("__next__", &PyOpOperandIterator::dunderNext, + "Returns the next operand in the iteration."); } private: @@ -1626,16 +1512,21 @@ class PyOpResult : public PyConcreteValue { static void bindDerived(ClassTy &c) { c.def_prop_ro( - "owner", [](PyOpResult &self) -> nb::typed { + "owner", + [](PyOpResult &self) -> nb::typed { assert(mlirOperationEqual(self.getParentOperation()->get(), mlirOpResultGetOwner(self.get())) && "expected the owner of the value in Python to match that in " "the IR"); return self.getParentOperation().getObject(); - }); - c.def_prop_ro("result_number", [](PyOpResult &self) { - return mlirOpResultGetResultNumber(self.get()); - }); + }, + "Returns the operation that produces this result."); + c.def_prop_ro( + "result_number", + [](PyOpResult &self) { + return mlirOpResultGetResultNumber(self.get()); + }, + "Returns the position of this result in the operation's result list."); } }; @@ -1671,13 +1562,18 @@ class PyOpResultList : public Sliceable { operation(std::move(operation)) {} static void bindDerived(ClassTy &c) { - c.def_prop_ro("types", [](PyOpResultList &self) { - return getValueTypes(self, self.operation->getContext()); - }); - c.def_prop_ro("owner", - [](PyOpResultList &self) -> nb::typed { - return self.operation->createOpView(); - }); + c.def_prop_ro( + "types", + [](PyOpResultList &self) { + return getValueTypes(self, self.operation->getContext()); + }, + "Returns a list of types for all results in this result list."); + c.def_prop_ro( + "owner", + [](PyOpResultList &self) -> nb::typed { + return self.operation->createOpView(); + }, + "Returns the operation that owns this result list."); } PyOperationRef &getOperation() { return operation; } @@ -2427,19 +2323,25 @@ class PyBlockArgument : public PyConcreteValue { using PyConcreteValue::PyConcreteValue; static void bindDerived(ClassTy &c) { - c.def_prop_ro("owner", [](PyBlockArgument &self) { - return PyBlock(self.getParentOperation(), - mlirBlockArgumentGetOwner(self.get())); - }); - c.def_prop_ro("arg_number", [](PyBlockArgument &self) { - return mlirBlockArgumentGetArgNumber(self.get()); - }); + c.def_prop_ro( + "owner", + [](PyBlockArgument &self) { + return PyBlock(self.getParentOperation(), + mlirBlockArgumentGetOwner(self.get())); + }, + "Returns the block that owns this argument."); + c.def_prop_ro( + "arg_number", + [](PyBlockArgument &self) { + return mlirBlockArgumentGetArgNumber(self.get()); + }, + "Returns the position of this argument in the block's argument list."); c.def( "set_type", [](PyBlockArgument &self, PyType type) { return mlirBlockArgumentSetType(self.get(), type); }, - nb::arg("type")); + nb::arg("type"), "Sets the type of this block argument."); } }; @@ -2462,9 +2364,12 @@ class PyBlockArgumentList operation(std::move(operation)), block(block) {} static void bindDerived(ClassTy &c) { - c.def_prop_ro("types", [](PyBlockArgumentList &self) { - return getValueTypes(self, self.operation->getContext()); - }); + c.def_prop_ro( + "types", + [](PyBlockArgumentList &self) { + return getValueTypes(self, self.operation->getContext()); + }, + "Returns a list of types for all arguments in this argument list."); } private: @@ -2516,7 +2421,9 @@ class PyOpOperandList : public Sliceable { } static void bindDerived(ClassTy &c) { - c.def("__setitem__", &PyOpOperandList::dunderSetItem); + c.def("__setitem__", &PyOpOperandList::dunderSetItem, nb::arg("index"), + nb::arg("value"), + "Sets the operand at the specified index to a new value."); } private: @@ -2571,7 +2478,8 @@ class PyOpSuccessors : public Sliceable { } static void bindDerived(ClassTy &c) { - c.def("__setitem__", &PyOpSuccessors::dunderSetItem); + c.def("__setitem__", &PyOpSuccessors::dunderSetItem, nb::arg("index"), + nb::arg("block"), "Sets the successor block at the specified index."); } private: @@ -2743,55 +2651,70 @@ class PyOpAttributeMap { static void bind(nb::module_ &m) { nb::class_(m, "OpAttributeMap") - .def("__contains__", &PyOpAttributeMap::dunderContains) - .def("__len__", &PyOpAttributeMap::dunderLen) - .def("__getitem__", &PyOpAttributeMap::dunderGetItemNamed) - .def("__getitem__", &PyOpAttributeMap::dunderGetItemIndexed) - .def("__setitem__", &PyOpAttributeMap::dunderSetItem) - .def("__delitem__", &PyOpAttributeMap::dunderDelItem) - .def("__iter__", - [](PyOpAttributeMap &self) { - nb::list keys; - PyOpAttributeMap::forEachAttr( - self.operation->get(), - [&](MlirStringRef name, MlirAttribute) { - keys.append(nb::str(name.data, name.length)); - }); - return nb::iter(keys); - }) - .def("keys", - [](PyOpAttributeMap &self) { - nb::list out; - PyOpAttributeMap::forEachAttr( - self.operation->get(), - [&](MlirStringRef name, MlirAttribute) { - out.append(nb::str(name.data, name.length)); - }); - return out; - }) - .def("values", - [](PyOpAttributeMap &self) { - nb::list out; - PyOpAttributeMap::forEachAttr( - self.operation->get(), - [&](MlirStringRef, MlirAttribute attr) { - out.append(PyAttribute(self.operation->getContext(), attr) - .maybeDownCast()); - }); - return out; - }) - .def("items", [](PyOpAttributeMap &self) { - nb::list out; - PyOpAttributeMap::forEachAttr( - self.operation->get(), - [&](MlirStringRef name, MlirAttribute attr) { - out.append(nb::make_tuple( - nb::str(name.data, name.length), - PyAttribute(self.operation->getContext(), attr) - .maybeDownCast())); - }); - return out; - }); + .def("__contains__", &PyOpAttributeMap::dunderContains, nb::arg("name"), + "Checks if an attribute with the given name exists in the map.") + .def("__len__", &PyOpAttributeMap::dunderLen, + "Returns the number of attributes in the map.") + .def("__getitem__", &PyOpAttributeMap::dunderGetItemNamed, + nb::arg("name"), "Gets an attribute by name.") + .def("__getitem__", &PyOpAttributeMap::dunderGetItemIndexed, + nb::arg("index"), "Gets a named attribute by index.") + .def("__setitem__", &PyOpAttributeMap::dunderSetItem, nb::arg("name"), + nb::arg("attr"), "Sets an attribute with the given name.") + .def("__delitem__", &PyOpAttributeMap::dunderDelItem, nb::arg("name"), + "Deletes an attribute with the given name.") + .def( + "__iter__", + [](PyOpAttributeMap &self) { + nb::list keys; + PyOpAttributeMap::forEachAttr( + self.operation->get(), + [&](MlirStringRef name, MlirAttribute) { + keys.append(nb::str(name.data, name.length)); + }); + return nb::iter(keys); + }, + "Iterates over attribute names.") + .def( + "keys", + [](PyOpAttributeMap &self) { + nb::list out; + PyOpAttributeMap::forEachAttr( + self.operation->get(), + [&](MlirStringRef name, MlirAttribute) { + out.append(nb::str(name.data, name.length)); + }); + return out; + }, + "Returns a list of attribute names.") + .def( + "values", + [](PyOpAttributeMap &self) { + nb::list out; + PyOpAttributeMap::forEachAttr( + self.operation->get(), + [&](MlirStringRef, MlirAttribute attr) { + out.append(PyAttribute(self.operation->getContext(), attr) + .maybeDownCast()); + }); + return out; + }, + "Returns a list of attribute values.") + .def( + "items", + [](PyOpAttributeMap &self) { + nb::list out; + PyOpAttributeMap::forEachAttr( + self.operation->get(), + [&](MlirStringRef name, MlirAttribute attr) { + out.append(nb::make_tuple( + nb::str(name.data, name.length), + PyAttribute(self.operation->getContext(), attr) + .maybeDownCast())); + }); + return out; + }, + "Returns a list of `(name, attribute)` tuples."); } private: @@ -2979,62 +2902,103 @@ void mlir::python::populateIRCore(nb::module_ &m) { // Mapping of Diagnostics. //---------------------------------------------------------------------------- nb::class_(m, "Diagnostic") - .def_prop_ro("severity", &PyDiagnostic::getSeverity) - .def_prop_ro("location", &PyDiagnostic::getLocation) - .def_prop_ro("message", &PyDiagnostic::getMessage) - .def_prop_ro("notes", &PyDiagnostic::getNotes) - .def("__str__", [](PyDiagnostic &self) -> nb::str { - if (!self.isValid()) - return nb::str(""); - return self.getMessage(); - }); + .def_prop_ro("severity", &PyDiagnostic::getSeverity, + "Returns the severity of the diagnostic.") + .def_prop_ro("location", &PyDiagnostic::getLocation, + "Returns the location associated with the diagnostic.") + .def_prop_ro("message", &PyDiagnostic::getMessage, + "Returns the message text of the diagnostic.") + .def_prop_ro("notes", &PyDiagnostic::getNotes, + "Returns a tuple of attached note diagnostics.") + .def( + "__str__", + [](PyDiagnostic &self) -> nb::str { + if (!self.isValid()) + return nb::str(""); + return self.getMessage(); + }, + "Returns the diagnostic message as a string."); nb::class_(m, "DiagnosticInfo") - .def("__init__", - [](PyDiagnostic::DiagnosticInfo &self, PyDiagnostic diag) { - new (&self) PyDiagnostic::DiagnosticInfo(diag.getInfo()); - }) - .def_ro("severity", &PyDiagnostic::DiagnosticInfo::severity) - .def_ro("location", &PyDiagnostic::DiagnosticInfo::location) - .def_ro("message", &PyDiagnostic::DiagnosticInfo::message) - .def_ro("notes", &PyDiagnostic::DiagnosticInfo::notes) - .def("__str__", - [](PyDiagnostic::DiagnosticInfo &self) { return self.message; }); + .def( + "__init__", + [](PyDiagnostic::DiagnosticInfo &self, PyDiagnostic diag) { + new (&self) PyDiagnostic::DiagnosticInfo(diag.getInfo()); + }, + "diag"_a, "Creates a DiagnosticInfo from a Diagnostic.") + .def_ro("severity", &PyDiagnostic::DiagnosticInfo::severity, + "The severity level of the diagnostic.") + .def_ro("location", &PyDiagnostic::DiagnosticInfo::location, + "The location associated with the diagnostic.") + .def_ro("message", &PyDiagnostic::DiagnosticInfo::message, + "The message text of the diagnostic.") + .def_ro("notes", &PyDiagnostic::DiagnosticInfo::notes, + "List of attached note diagnostics.") + .def( + "__str__", + [](PyDiagnostic::DiagnosticInfo &self) { return self.message; }, + "Returns the diagnostic message as a string."); nb::class_(m, "DiagnosticHandler") - .def("detach", &PyDiagnosticHandler::detach) - .def_prop_ro("attached", &PyDiagnosticHandler::isAttached) - .def_prop_ro("had_error", &PyDiagnosticHandler::getHadError) - .def("__enter__", &PyDiagnosticHandler::contextEnter) + .def("detach", &PyDiagnosticHandler::detach, + "Detaches the diagnostic handler from the context.") + .def_prop_ro("attached", &PyDiagnosticHandler::isAttached, + "Returns True if the handler is attached to a context.") + .def_prop_ro("had_error", &PyDiagnosticHandler::getHadError, + "Returns True if an error was encountered during diagnostic " + "handling.") + .def("__enter__", &PyDiagnosticHandler::contextEnter, + "Enters the diagnostic handler as a context manager.") .def("__exit__", &PyDiagnosticHandler::contextExit, nb::arg("exc_type").none(), nb::arg("exc_value").none(), - nb::arg("traceback").none()); + nb::arg("traceback").none(), + "Exits the diagnostic handler context manager."); // Expose DefaultThreadPool to python nb::class_(m, "ThreadPool") - .def("__init__", [](PyThreadPool &self) { new (&self) PyThreadPool(); }) - .def("get_max_concurrency", &PyThreadPool::getMaxConcurrency) - .def("_mlir_thread_pool_ptr", &PyThreadPool::_mlir_thread_pool_ptr); + .def( + "__init__", [](PyThreadPool &self) { new (&self) PyThreadPool(); }, + "Creates a new thread pool with default concurrency.") + .def("get_max_concurrency", &PyThreadPool::getMaxConcurrency, + "Returns the maximum number of threads in the pool.") + .def("_mlir_thread_pool_ptr", &PyThreadPool::_mlir_thread_pool_ptr, + "Returns the raw pointer to the LLVM thread pool as a string."); nb::class_(m, "Context") - .def("__init__", - [](PyMlirContext &self) { - MlirContext context = mlirContextCreateWithThreading(false); - new (&self) PyMlirContext(context); - }) - .def_static("_get_live_count", &PyMlirContext::getLiveCount) - .def("_get_context_again", - [](PyMlirContext &self) -> nb::typed { - PyMlirContextRef ref = PyMlirContext::forContext(self.get()); - return ref.releaseObject(); - }) - .def("_get_live_module_count", &PyMlirContext::getLiveModuleCount) - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyMlirContext::getCapsule) + .def( + "__init__", + [](PyMlirContext &self) { + MlirContext context = mlirContextCreateWithThreading(false); + new (&self) PyMlirContext(context); + }, + R"( + Creates a new MLIR context. + + The context is the top-level container for all MLIR objects. It owns the storage + for types, attributes, locations, and other core IR objects. A context can be + configured to allow or disallow unregistered dialects and can have dialects + loaded on-demand.)") + .def_static("_get_live_count", &PyMlirContext::getLiveCount, + "Gets the number of live Context objects.") + .def( + "_get_context_again", + [](PyMlirContext &self) -> nb::typed { + PyMlirContextRef ref = PyMlirContext::forContext(self.get()); + return ref.releaseObject(); + }, + "Gets another reference to the same context.") + .def("_get_live_module_count", &PyMlirContext::getLiveModuleCount, + "Gets the number of live modules owned by this context.") + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyMlirContext::getCapsule, + "Gets a capsule wrapping the MlirContext.") .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, - &PyMlirContext::createFromCapsule) - .def("__enter__", &PyMlirContext::contextEnter) + &PyMlirContext::createFromCapsule, + "Creates a Context from a capsule wrapping MlirContext.") + .def("__enter__", &PyMlirContext::contextEnter, + "Enters the context as a context manager.") .def("__exit__", &PyMlirContext::contextExit, nb::arg("exc_type").none(), - nb::arg("exc_value").none(), nb::arg("traceback").none()) + nb::arg("exc_value").none(), nb::arg("traceback").none(), + "Exits the context manager.") .def_prop_ro_static( "current", [](nb::object & /*class*/) @@ -3045,14 +3009,15 @@ void mlir::python::populateIRCore(nb::module_ &m) { return nb::cast(context); }, nb::sig("def current(/) -> Context | None"), - "Gets the Context bound to the current thread or raises ValueError") + "Gets the Context bound to the current thread or returns None if no " + "context is set.") .def_prop_ro( "dialects", [](PyMlirContext &self) { return PyDialects(self.getRef()); }, - "Gets a container for accessing dialects by name") + "Gets a container for accessing dialects by name.") .def_prop_ro( "d", [](PyMlirContext &self) { return PyDialects(self.getRef()); }, - "Alias for 'dialect'") + "Alias for `dialects`.") .def( "get_dialect_descriptor", [=](PyMlirContext &self, std::string &name) { @@ -3065,7 +3030,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { return PyDialectDescriptor(self.getRef(), dialect); }, nb::arg("dialect_name"), - "Gets or loads a dialect by name, returning its descriptor object") + "Gets or loads a dialect by name, returning its descriptor object.") .def_prop_rw( "allow_unregistered_dialects", [](PyMlirContext &self) -> bool { @@ -3073,67 +3038,110 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, [](PyMlirContext &self, bool value) { mlirContextSetAllowUnregisteredDialects(self.get(), value); - }) + }, + "Controls whether unregistered dialects are allowed in this context.") .def("attach_diagnostic_handler", &PyMlirContext::attachDiagnosticHandler, nb::arg("callback"), - "Attaches a diagnostic handler that will receive callbacks") + "Attaches a diagnostic handler that will receive callbacks.") .def( "enable_multithreading", [](PyMlirContext &self, bool enable) { mlirContextEnableMultithreading(self.get(), enable); }, - nb::arg("enable")) - .def("set_thread_pool", - [](PyMlirContext &self, PyThreadPool &pool) { - // we should disable multi-threading first before setting - // new thread pool otherwise the assert in - // MLIRContext::setThreadPool will be raised. - mlirContextEnableMultithreading(self.get(), false); - mlirContextSetThreadPool(self.get(), pool.get()); - }) - .def("get_num_threads", - [](PyMlirContext &self) { - return mlirContextGetNumThreads(self.get()); - }) - .def("_mlir_thread_pool_ptr", - [](PyMlirContext &self) { - MlirLlvmThreadPool pool = mlirContextGetThreadPool(self.get()); - std::stringstream ss; - ss << pool.ptr; - return ss.str(); - }) + nb::arg("enable"), + R"( + Enables or disables multi-threading support in the context. + + Args: + enable: Whether to enable (True) or disable (False) multi-threading. + )") + .def( + "set_thread_pool", + [](PyMlirContext &self, PyThreadPool &pool) { + // we should disable multi-threading first before setting + // new thread pool otherwise the assert in + // MLIRContext::setThreadPool will be raised. + mlirContextEnableMultithreading(self.get(), false); + mlirContextSetThreadPool(self.get(), pool.get()); + }, + R"( + Sets a custom thread pool for the context to use. + + Args: + pool: A ThreadPool object to use for parallel operations. + + Note: + Multi-threading is automatically disabled before setting the thread pool.)") + .def( + "get_num_threads", + [](PyMlirContext &self) { + return mlirContextGetNumThreads(self.get()); + }, + "Gets the number of threads in the context's thread pool.") + .def( + "_mlir_thread_pool_ptr", + [](PyMlirContext &self) { + MlirLlvmThreadPool pool = mlirContextGetThreadPool(self.get()); + std::stringstream ss; + ss << pool.ptr; + return ss.str(); + }, + "Gets the raw pointer to the LLVM thread pool as a string.") .def( "is_registered_operation", [](PyMlirContext &self, std::string &name) { return mlirContextIsRegisteredOperation( self.get(), MlirStringRef{name.data(), name.size()}); }, - nb::arg("operation_name")) + nb::arg("operation_name"), + R"( + Checks whether an operation with the given name is registered. + + Args: + operation_name: The fully qualified name of the operation (e.g., `arith.addf`). + + Returns: + True if the operation is registered, False otherwise.)") .def( "append_dialect_registry", [](PyMlirContext &self, PyDialectRegistry ®istry) { mlirContextAppendDialectRegistry(self.get(), registry); }, - nb::arg("registry")) + nb::arg("registry"), + R"( + Appends the contents of a dialect registry to the context. + + Args: + registry: A DialectRegistry containing dialects to append.)") .def_prop_rw("emit_error_diagnostics", &PyMlirContext::getEmitErrorDiagnostics, &PyMlirContext::setEmitErrorDiagnostics, - "Emit error diagnostics to diagnostic handlers. By default " - "error diagnostics are captured and reported through " - "MLIRError exceptions.") - .def("load_all_available_dialects", [](PyMlirContext &self) { - mlirContextLoadAllAvailableDialects(self.get()); - }); + R"( + Controls whether error diagnostics are emitted to diagnostic handlers. + + By default, error diagnostics are captured and reported through MLIRError exceptions.)") + .def( + "load_all_available_dialects", + [](PyMlirContext &self) { + mlirContextLoadAllAvailableDialects(self.get()); + }, + R"( + Loads all dialects available in the registry into the context. + + This eagerly loads all dialects that have been registered, making them + immediately available for use.)"); //---------------------------------------------------------------------------- // Mapping of PyDialectDescriptor //---------------------------------------------------------------------------- nb::class_(m, "DialectDescriptor") - .def_prop_ro("namespace", - [](PyDialectDescriptor &self) { - MlirStringRef ns = mlirDialectGetNamespace(self.get()); - return nb::str(ns.data, ns.length); - }) + .def_prop_ro( + "namespace", + [](PyDialectDescriptor &self) { + MlirStringRef ns = mlirDialectGetNamespace(self.get()); + return nb::str(ns.data, ns.length); + }, + "Returns the namespace of the dialect.") .def( "__repr__", [](PyDialectDescriptor &self) { @@ -3143,35 +3151,43 @@ void mlir::python::populateIRCore(nb::module_ &m) { repr.append(">"); return repr; }, - nb::sig("def __repr__(self) -> str")); + nb::sig("def __repr__(self) -> str"), + "Returns a string representation of the dialect descriptor."); //---------------------------------------------------------------------------- // Mapping of PyDialects //---------------------------------------------------------------------------- nb::class_(m, "Dialects") - .def("__getitem__", - [=](PyDialects &self, std::string keyName) { - MlirDialect dialect = - self.getDialectForKey(keyName, /*attrError=*/false); - nb::object descriptor = - nb::cast(PyDialectDescriptor{self.getContext(), dialect}); - return createCustomDialectWrapper(keyName, std::move(descriptor)); - }) - .def("__getattr__", [=](PyDialects &self, std::string attrName) { - MlirDialect dialect = - self.getDialectForKey(attrName, /*attrError=*/true); - nb::object descriptor = - nb::cast(PyDialectDescriptor{self.getContext(), dialect}); - return createCustomDialectWrapper(attrName, std::move(descriptor)); - }); + .def( + "__getitem__", + [=](PyDialects &self, std::string keyName) { + MlirDialect dialect = + self.getDialectForKey(keyName, /*attrError=*/false); + nb::object descriptor = + nb::cast(PyDialectDescriptor{self.getContext(), dialect}); + return createCustomDialectWrapper(keyName, std::move(descriptor)); + }, + "Gets a dialect by name using subscript notation.") + .def( + "__getattr__", + [=](PyDialects &self, std::string attrName) { + MlirDialect dialect = + self.getDialectForKey(attrName, /*attrError=*/true); + nb::object descriptor = + nb::cast(PyDialectDescriptor{self.getContext(), dialect}); + return createCustomDialectWrapper(attrName, std::move(descriptor)); + }, + "Gets a dialect by name using attribute notation."); //---------------------------------------------------------------------------- // Mapping of PyDialect //---------------------------------------------------------------------------- nb::class_(m, "Dialect") - .def(nb::init(), nb::arg("descriptor")) - .def_prop_ro("descriptor", - [](PyDialect &self) { return self.getDescriptor(); }) + .def(nb::init(), nb::arg("descriptor"), + "Creates a Dialect from a DialectDescriptor.") + .def_prop_ro( + "descriptor", [](PyDialect &self) { return self.getDescriptor(); }, + "Returns the DialectDescriptor for this dialect.") .def( "__repr__", [](const nb::object &self) { @@ -3181,31 +3197,43 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::str(" (class ") + clazz.attr("__module__") + nb::str(".") + clazz.attr("__name__") + nb::str(")>"); }, - nb::sig("def __repr__(self) -> str")); + nb::sig("def __repr__(self) -> str"), + "Returns a string representation of the dialect."); //---------------------------------------------------------------------------- // Mapping of PyDialectRegistry //---------------------------------------------------------------------------- nb::class_(m, "DialectRegistry") - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyDialectRegistry::getCapsule) + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyDialectRegistry::getCapsule, + "Gets a capsule wrapping the MlirDialectRegistry.") .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, - &PyDialectRegistry::createFromCapsule) - .def(nb::init<>()); + &PyDialectRegistry::createFromCapsule, + "Creates a DialectRegistry from a capsule wrapping " + "`MlirDialectRegistry`.") + .def(nb::init<>(), "Creates a new empty dialect registry."); //---------------------------------------------------------------------------- // Mapping of Location //---------------------------------------------------------------------------- nb::class_(m, "Location") - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyLocation::getCapsule) - .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyLocation::createFromCapsule) - .def("__enter__", &PyLocation::contextEnter) + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyLocation::getCapsule, + "Gets a capsule wrapping the MlirLocation.") + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyLocation::createFromCapsule, + "Creates a Location from a capsule wrapping MlirLocation.") + .def("__enter__", &PyLocation::contextEnter, + "Enters the location as a context manager.") .def("__exit__", &PyLocation::contextExit, nb::arg("exc_type").none(), - nb::arg("exc_value").none(), nb::arg("traceback").none()) - .def("__eq__", - [](PyLocation &self, PyLocation &other) -> bool { - return mlirLocationEqual(self, other); - }) - .def("__eq__", [](PyLocation &self, nb::object other) { return false; }) + nb::arg("exc_value").none(), nb::arg("traceback").none(), + "Exits the location context manager.") + .def( + "__eq__", + [](PyLocation &self, PyLocation &other) -> bool { + return mlirLocationEqual(self, other); + }, + "Compares two locations for equality.") + .def( + "__eq__", [](PyLocation &self, nb::object other) { return false; }, + "Compares location with non-location object (always returns False).") .def_prop_ro_static( "current", [](nb::object & /*class*/) -> std::optional { @@ -3217,7 +3245,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { // clang-format off nb::sig("def current(/) -> Location | None"), // clang-format on - "Gets the Location bound to the current thread or raises ValueError") + "Gets the Location bound to the current thread or raises ValueError.") .def_static( "unknown", [](DefaultingPyMlirContext context) { @@ -3225,13 +3253,13 @@ void mlir::python::populateIRCore(nb::module_ &m) { mlirLocationUnknownGet(context->get())); }, nb::arg("context") = nb::none(), - "Gets a Location representing an unknown location") + "Gets a Location representing an unknown location.") .def_static( "callsite", [](PyLocation callee, const std::vector &frames, DefaultingPyMlirContext context) { if (frames.empty()) - throw nb::value_error("No caller frames provided"); + throw nb::value_error("No caller frames provided."); MlirLocation caller = frames.back().get(); for (const PyLocation &frame : llvm::reverse(llvm::ArrayRef(frames).drop_back())) @@ -3240,18 +3268,23 @@ void mlir::python::populateIRCore(nb::module_ &m) { mlirLocationCallSiteGet(callee.get(), caller)); }, nb::arg("callee"), nb::arg("frames"), nb::arg("context") = nb::none(), - kContextGetCallSiteLocationDocstring) - .def("is_a_callsite", mlirLocationIsACallSite) - .def_prop_ro("callee", - [](PyLocation &self) { - return PyLocation(self.getContext(), - mlirLocationCallSiteGetCallee(self)); - }) - .def_prop_ro("caller", - [](PyLocation &self) { - return PyLocation(self.getContext(), - mlirLocationCallSiteGetCaller(self)); - }) + "Gets a Location representing a caller and callsite.") + .def("is_a_callsite", mlirLocationIsACallSite, + "Returns True if this location is a CallSiteLoc.") + .def_prop_ro( + "callee", + [](PyLocation &self) { + return PyLocation(self.getContext(), + mlirLocationCallSiteGetCallee(self)); + }, + "Gets the callee location from a CallSiteLoc.") + .def_prop_ro( + "caller", + [](PyLocation &self) { + return PyLocation(self.getContext(), + mlirLocationCallSiteGetCaller(self)); + }, + "Gets the caller location from a CallSiteLoc.") .def_static( "file", [](std::string filename, int line, int col, @@ -3262,7 +3295,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { context->get(), toMlirStringRef(filename), line, col)); }, nb::arg("filename"), nb::arg("line"), nb::arg("col"), - nb::arg("context") = nb::none(), kContextGetFileLocationDocstring) + nb::arg("context") = nb::none(), + "Gets a Location representing a file, line and column.") .def_static( "file", [](std::string filename, int startLine, int startCol, int endLine, @@ -3274,17 +3308,25 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, nb::arg("filename"), nb::arg("start_line"), nb::arg("start_col"), nb::arg("end_line"), nb::arg("end_col"), - nb::arg("context") = nb::none(), kContextGetFileRangeDocstring) - .def("is_a_file", mlirLocationIsAFileLineColRange) - .def_prop_ro("filename", - [](MlirLocation loc) { - return mlirIdentifierStr( - mlirLocationFileLineColRangeGetFilename(loc)); - }) - .def_prop_ro("start_line", mlirLocationFileLineColRangeGetStartLine) - .def_prop_ro("start_col", mlirLocationFileLineColRangeGetStartColumn) - .def_prop_ro("end_line", mlirLocationFileLineColRangeGetEndLine) - .def_prop_ro("end_col", mlirLocationFileLineColRangeGetEndColumn) + nb::arg("context") = nb::none(), + "Gets a Location representing a file, line and column range.") + .def("is_a_file", mlirLocationIsAFileLineColRange, + "Returns True if this location is a FileLineColLoc.") + .def_prop_ro( + "filename", + [](MlirLocation loc) { + return mlirIdentifierStr( + mlirLocationFileLineColRangeGetFilename(loc)); + }, + "Gets the filename from a FileLineColLoc.") + .def_prop_ro("start_line", mlirLocationFileLineColRangeGetStartLine, + "Gets the start line number from a `FileLineColLoc`.") + .def_prop_ro("start_col", mlirLocationFileLineColRangeGetStartColumn, + "Gets the start column number from a `FileLineColLoc`.") + .def_prop_ro("end_line", mlirLocationFileLineColRangeGetEndLine, + "Gets the end line number from a `FileLineColLoc`.") + .def_prop_ro("end_col", mlirLocationFileLineColRangeGetEndColumn, + "Gets the end column number from a `FileLineColLoc`.") .def_static( "fused", [](const std::vector &pyLocations, @@ -3300,8 +3342,11 @@ void mlir::python::populateIRCore(nb::module_ &m) { return PyLocation(context->getRef(), location); }, nb::arg("locations"), nb::arg("metadata") = nb::none(), - nb::arg("context") = nb::none(), kContextGetFusedLocationDocstring) - .def("is_a_fused", mlirLocationIsAFused) + nb::arg("context") = nb::none(), + "Gets a Location representing a fused location with optional " + "metadata.") + .def("is_a_fused", mlirLocationIsAFused, + "Returns True if this location is a `FusedLoc`.") .def_prop_ro( "locations", [](PyLocation &self) { @@ -3314,7 +3359,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { for (unsigned i = 0; i < numLocations; ++i) pyLocations.emplace_back(self.getContext(), locations[i]); return pyLocations; - }) + }, + "Gets the list of locations from a `FusedLoc`.") .def_static( "name", [](std::string name, std::optional childLoc, @@ -3327,17 +3373,24 @@ void mlir::python::populateIRCore(nb::module_ &m) { : mlirLocationUnknownGet(context->get()))); }, nb::arg("name"), nb::arg("childLoc") = nb::none(), - nb::arg("context") = nb::none(), kContextGetNameLocationDocString) - .def("is_a_name", mlirLocationIsAName) - .def_prop_ro("name_str", - [](MlirLocation loc) { - return mlirIdentifierStr(mlirLocationNameGetName(loc)); - }) - .def_prop_ro("child_loc", - [](PyLocation &self) { - return PyLocation(self.getContext(), - mlirLocationNameGetChildLoc(self)); - }) + nb::arg("context") = nb::none(), + "Gets a Location representing a named location with optional child " + "location.") + .def("is_a_name", mlirLocationIsAName, + "Returns True if this location is a `NameLoc`.") + .def_prop_ro( + "name_str", + [](MlirLocation loc) { + return mlirIdentifierStr(mlirLocationNameGetName(loc)); + }, + "Gets the name string from a `NameLoc`.") + .def_prop_ro( + "child_loc", + [](PyLocation &self) { + return PyLocation(self.getContext(), + mlirLocationNameGetChildLoc(self)); + }, + "Gets the child location from a `NameLoc`.") .def_static( "from_attr", [](PyAttribute &attribute, DefaultingPyMlirContext context) { @@ -3345,41 +3398,59 @@ void mlir::python::populateIRCore(nb::module_ &m) { mlirLocationFromAttribute(attribute)); }, nb::arg("attribute"), nb::arg("context") = nb::none(), - "Gets a Location from a LocationAttr") + "Gets a Location from a `LocationAttr`.") .def_prop_ro( "context", [](PyLocation &self) -> nb::typed { return self.getContext().getObject(); }, - "Context that owns the Location") + "Context that owns the `Location`.") .def_prop_ro( "attr", [](PyLocation &self) { return PyAttribute(self.getContext(), mlirLocationGetAttribute(self)); }, - "Get the underlying LocationAttr") + "Get the underlying `LocationAttr`.") .def( "emit_error", [](PyLocation &self, std::string message) { mlirEmitError(self, message.c_str()); }, - nb::arg("message"), "Emits an error at this location") - .def("__repr__", [](PyLocation &self) { - PyPrintAccumulator printAccum; - mlirLocationPrint(self, printAccum.getCallback(), - printAccum.getUserData()); - return printAccum.join(); - }); + nb::arg("message"), + R"( + Emits an error diagnostic at this location. + + Args: + message: The error message to emit.)") + .def( + "__repr__", + [](PyLocation &self) { + PyPrintAccumulator printAccum; + mlirLocationPrint(self, printAccum.getCallback(), + printAccum.getUserData()); + return printAccum.join(); + }, + "Returns the assembly representation of the location."); //---------------------------------------------------------------------------- // Mapping of Module //---------------------------------------------------------------------------- nb::class_(m, "Module", nb::is_weak_referenceable()) - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyModule::getCapsule) + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyModule::getCapsule, + "Gets a capsule wrapping the MlirModule.") .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyModule::createFromCapsule, - kModuleCAPICreate) - .def("_clear_mlir_module", &PyModule::clearMlirModule) + R"( + Creates a Module from a `MlirModule` wrapped by a capsule (i.e. `module._CAPIPtr`). + + This returns a new object **BUT** `_clear_mlir_module(module)` must be called to + prevent double-frees (of the underlying `mlir::Module`).)") + .def("_clear_mlir_module", &PyModule::clearMlirModule, + R"( + Clears the internal MLIR module reference. + + This is used internally to prevent double-free when ownership is transferred + via the C API capsule mechanism. Not intended for normal use.)") .def_static( "parse", [](const std::string &moduleAsm, DefaultingPyMlirContext context) @@ -3427,13 +3498,13 @@ void mlir::python::populateIRCore(nb::module_ &m) { MlirModule module = mlirModuleCreateEmpty(pyLoc.get()); return PyModule::forModule(module).releaseObject(); }, - nb::arg("loc") = nb::none(), "Creates an empty module") + nb::arg("loc") = nb::none(), "Creates an empty module.") .def_prop_ro( "context", [](PyModule &self) -> nb::typed { return self.getContext().getObject(); }, - "Context that created the Module") + "Context that created the `Module`.") .def_prop_ro( "operation", [](PyModule &self) -> nb::typed { @@ -3442,7 +3513,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { self.getRef().releaseObject()) .releaseObject(); }, - "Accesses the module as an operation") + "Accesses the module as an operation.") .def_prop_ro( "body", [](PyModule &self) { @@ -3452,7 +3523,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { PyBlock returnBlock(moduleOp, mlirModuleGetBody(self.get())); return returnBlock; }, - "Return the block for this module") + "Return the block for this module.") .def( "dump", [](PyModule &self) { @@ -3465,39 +3536,59 @@ void mlir::python::populateIRCore(nb::module_ &m) { // Defer to the operation's __str__. return self.attr("operation").attr("__str__")(); }, - nb::sig("def __str__(self) -> str"), kOperationStrDunderDocstring) + nb::sig("def __str__(self) -> str"), + R"( + Gets the assembly form of the operation with default options. + + If more advanced control over the assembly formatting or I/O options is needed, + use the dedicated print or get_asm method, which supports keyword arguments to + customize behavior. + )") .def( "__eq__", [](PyModule &self, PyModule &other) { return mlirModuleEqual(self.get(), other.get()); }, - "other"_a) - .def("__hash__", - [](PyModule &self) { return mlirModuleHashValue(self.get()); }); + "other"_a, "Compares two modules for equality.") + .def( + "__hash__", + [](PyModule &self) { return mlirModuleHashValue(self.get()); }, + "Returns the hash value of the module."); //---------------------------------------------------------------------------- // Mapping of Operation. //---------------------------------------------------------------------------- nb::class_(m, "_OperationBase") - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, - [](PyOperationBase &self) { - return self.getOperation().getCapsule(); - }) - .def("__eq__", - [](PyOperationBase &self, PyOperationBase &other) { - return mlirOperationEqual(self.getOperation().get(), - other.getOperation().get()); - }) - .def("__eq__", - [](PyOperationBase &self, nb::object other) { return false; }) - .def("__hash__", - [](PyOperationBase &self) { - return mlirOperationHashValue(self.getOperation().get()); - }) - .def_prop_ro("attributes", - [](PyOperationBase &self) { - return PyOpAttributeMap(self.getOperation().getRef()); - }) + .def_prop_ro( + MLIR_PYTHON_CAPI_PTR_ATTR, + [](PyOperationBase &self) { + return self.getOperation().getCapsule(); + }, + "Gets a capsule wrapping the `MlirOperation`.") + .def( + "__eq__", + [](PyOperationBase &self, PyOperationBase &other) { + return mlirOperationEqual(self.getOperation().get(), + other.getOperation().get()); + }, + "Compares two operations for equality.") + .def( + "__eq__", + [](PyOperationBase &self, nb::object other) { return false; }, + "Compares operation with non-operation object (always returns " + "False).") + .def( + "__hash__", + [](PyOperationBase &self) { + return mlirOperationHashValue(self.getOperation().get()); + }, + "Returns the hash value of the operation.") + .def_prop_ro( + "attributes", + [](PyOperationBase &self) { + return PyOpAttributeMap(self.getOperation().getRef()); + }, + "Returns a dictionary-like map of operation attributes.") .def_prop_ro( "context", [](PyOperationBase &self) -> nb::typed { @@ -3505,22 +3596,28 @@ void mlir::python::populateIRCore(nb::module_ &m) { concreteOperation.checkValid(); return concreteOperation.getContext().getObject(); }, - "Context that owns the Operation") - .def_prop_ro("name", - [](PyOperationBase &self) { - auto &concreteOperation = self.getOperation(); - concreteOperation.checkValid(); - MlirOperation operation = concreteOperation.get(); - return mlirIdentifierStr(mlirOperationGetName(operation)); - }) - .def_prop_ro("operands", - [](PyOperationBase &self) { - return PyOpOperandList(self.getOperation().getRef()); - }) - .def_prop_ro("regions", - [](PyOperationBase &self) { - return PyRegionList(self.getOperation().getRef()); - }) + "Context that owns the operation.") + .def_prop_ro( + "name", + [](PyOperationBase &self) { + auto &concreteOperation = self.getOperation(); + concreteOperation.checkValid(); + MlirOperation operation = concreteOperation.get(); + return mlirIdentifierStr(mlirOperationGetName(operation)); + }, + "Returns the fully qualified name of the operation.") + .def_prop_ro( + "operands", + [](PyOperationBase &self) { + return PyOpOperandList(self.getOperation().getRef()); + }, + "Returns the list of operation operands.") + .def_prop_ro( + "regions", + [](PyOperationBase &self) { + return PyRegionList(self.getOperation().getRef()); + }, + "Returns the list of operation regions.") .def_prop_ro( "results", [](PyOperationBase &self) { @@ -3551,14 +3648,16 @@ void mlir::python::populateIRCore(nb::module_ &m) { "defined or derived from."), nb::for_setter("Sets the source location the operation was defined " "or derived from.")) - .def_prop_ro("parent", - [](PyOperationBase &self) - -> std::optional> { - auto parent = self.getOperation().getParentOperation(); - if (parent) - return parent->getObject(); - return {}; - }) + .def_prop_ro( + "parent", + [](PyOperationBase &self) + -> std::optional> { + auto parent = self.getOperation().getParentOperation(); + if (parent) + return parent->getObject(); + return {}; + }, + "Returns the parent operation, or `None` if at top level.") .def( "__str__", [](PyOperationBase &self) { @@ -3579,7 +3678,14 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::overload_cast( &PyOperationBase::print), nb::arg("state"), nb::arg("file") = nb::none(), - nb::arg("binary") = false, kOperationPrintStateDocstring) + nb::arg("binary") = false, + R"( + Prints the assembly form of the operation to a file like object. + + Args: + state: `AsmState` capturing the operation numbering and flags. + file: Optional file like object to write to. Defaults to sys.stdout. + binary: Whether to write `bytes` (True) or `str` (False). Defaults to False.)") .def("print", nb::overload_cast, std::optional, bool, bool, bool, bool, bool, bool, nb::object, @@ -3594,10 +3700,47 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::arg("use_name_loc_as_prefix") = false, nb::arg("assume_verified") = false, nb::arg("file") = nb::none(), nb::arg("binary") = false, nb::arg("skip_regions") = false, - kOperationPrintDocstring) + R"( + Prints the assembly form of the operation to a file like object. + + Args: + large_elements_limit: Whether to elide elements attributes above this + number of elements. Defaults to None (no limit). + large_resource_limit: Whether to elide resource attributes above this + number of characters. Defaults to None (no limit). If large_elements_limit + is set and this is None, the behavior will be to use large_elements_limit + as large_resource_limit. + enable_debug_info: Whether to print debug/location information. Defaults + to False. + pretty_debug_info: Whether to format debug information for easier reading + by a human (warning: the result is unparseable). Defaults to False. + print_generic_op_form: Whether to print the generic assembly forms of all + ops. Defaults to False. + use_local_scope: Whether to print in a way that is more optimized for + multi-threaded access but may not be consistent with how the overall + module prints. + use_name_loc_as_prefix: Whether to use location attributes (NameLoc) as + prefixes for the SSA identifiers. Defaults to False. + assume_verified: By default, if not printing generic form, the verifier + will be run and if it fails, generic form will be printed with a comment + about failed verification. While a reasonable default for interactive use, + for systematic use, it is often better for the caller to verify explicitly + and report failures in a more robust fashion. Set this to True if doing this + in order to avoid running a redundant verification. If the IR is actually + invalid, behavior is undefined. + file: The file like object to write to. Defaults to sys.stdout. + binary: Whether to write bytes (True) or str (False). Defaults to False. + skip_regions: Whether to skip printing regions. Defaults to False.)") .def("write_bytecode", &PyOperationBase::writeBytecode, nb::arg("file"), nb::arg("desired_version") = nb::none(), - kOperationPrintBytecodeDocstring) + R"( + Write the bytecode form of the operation to a file like object. + + Args: + file: The file like object to write to. + desired_version: Optional version of bytecode to emit. + Returns: + The bytecode writer status.)") .def("get_asm", &PyOperationBase::getAsm, // Careful: Lots of arguments must match up with get_asm method. nb::arg("binary") = false, @@ -3609,7 +3752,17 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::arg("use_local_scope") = false, nb::arg("use_name_loc_as_prefix") = false, nb::arg("assume_verified") = false, nb::arg("skip_regions") = false, - kOperationGetAsmDocstring) + R"( + Gets the assembly form of the operation with all options available. + + Args: + binary: Whether to return a bytes (True) or str (False) object. Defaults to + False. + ... others ...: See the print() method for common keyword arguments for + configuring the printout. + Returns: + Either a bytes or str object, depending on the setting of the `binary` + argument.)") .def("verify", &PyOperationBase::verify, "Verify the operation. Raises MLIRError if verification fails, and " "returns true otherwise.") @@ -3621,18 +3774,31 @@ void mlir::python::populateIRCore(nb::module_ &m) { "block.") .def("is_before_in_block", &PyOperationBase::isBeforeInBlock, nb::arg("other"), - "Given an operation 'other' that is within the same parent block, " - "return" - "whether the current operation is before 'other' in the operation " - "list" - "of the parent block.") + R"( + Checks if this operation is before another in the same block. + + Args: + other: Another operation in the same parent block. + + Returns: + True if this operation is before `other` in the operation list of the parent block.)") .def( "clone", [](PyOperationBase &self, const nb::object &ip) -> nb::typed { return self.getOperation().clone(ip); }, - nb::arg("ip") = nb::none()) + nb::arg("ip") = nb::none(), + R"( + Creates a deep copy of the operation. + + Args: + ip: Optional insertion point where the cloned operation should be inserted. + If None, the current insertion point is used. If False, the operation + remains detached. + + Returns: + A new Operation that is a clone of this operation.)") .def( "detach_from_parent", [](PyOperationBase &self) -> nb::typed { @@ -3653,13 +3819,24 @@ void mlir::python::populateIRCore(nb::module_ &m) { return operation.isAttached(); }, "Reports if the operation is attached to its parent block.") - .def("erase", [](PyOperationBase &self) { self.getOperation().erase(); }) + .def( + "erase", [](PyOperationBase &self) { self.getOperation().erase(); }, + R"( + Erases the operation and frees its memory. + + Note: + After erasing, any Python references to the operation become invalid.)") .def("walk", &PyOperationBase::walk, nb::arg("callback"), nb::arg("walk_order") = MlirWalkPostOrder, // clang-format off - nb::sig("def walk(self, callback: Callable[[Operation], WalkResult], walk_order: WalkOrder) -> None") + nb::sig("def walk(self, callback: Callable[[Operation], WalkResult], walk_order: WalkOrder) -> None"), // clang-format on - ); + R"( + Walks the operation tree with a callback function. + + Args: + callback: A callable that takes an Operation and returns a WalkResult. + walk_order: The order of traversal (PRE_ORDER or POST_ORDER).)"); nb::class_(m, "Operation") .def_static( @@ -3692,7 +3869,22 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::arg("operands") = nb::none(), nb::arg("attributes") = nb::none(), nb::arg("successors") = nb::none(), nb::arg("regions") = 0, nb::arg("loc") = nb::none(), nb::arg("ip") = nb::none(), - nb::arg("infer_type") = false, kOperationCreateDocstring) + nb::arg("infer_type") = false, + R"( + Creates a new operation. + + Args: + name: Operation name (e.g. `dialect.operation`). + results: Optional sequence of Type representing op result types. + operands: Optional operands of the operation. + attributes: Optional Dict of {str: Attribute}. + successors: Optional List of Block for the operation's successors. + regions: Number of regions to create (default = 0). + location: Optional Location object (defaults to resolve from context manager). + ip: Optional InsertionPoint (defaults to resolve from context manager or set to False to disable insertion, even with an insertion point set in the context manager). + infer_type: Whether to infer result types (default = False). + Returns: + A new detached Operation object. Detached operations can be added to blocks, which causes them to become attached.)") .def_static( "parse", [](const std::string &sourceStr, const std::string &sourceName, @@ -3705,18 +3897,30 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::arg("context") = nb::none(), "Parses an operation. Supports both text assembly format and binary " "bytecode format.") - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyOperation::getCapsule) + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyOperation::getCapsule, + "Gets a capsule wrapping the MlirOperation.") .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, - &PyOperation::createFromCapsule) - .def_prop_ro("operation", - [](nb::object self) -> nb::typed { - return self; - }) - .def_prop_ro("opview", - [](PyOperation &self) -> nb::typed { - return self.createOpView(); - }) - .def_prop_ro("block", &PyOperation::getBlock) + &PyOperation::createFromCapsule, + "Creates an Operation from a capsule wrapping MlirOperation.") + .def_prop_ro( + "operation", + [](nb::object self) -> nb::typed { + return self; + }, + "Returns self (the operation).") + .def_prop_ro( + "opview", + [](PyOperation &self) -> nb::typed { + return self.createOpView(); + }, + R"( + Returns an OpView of this operation. + + Note: + If the operation has a registered and loaded dialect then this OpView will + be concrete wrapper class.)") + .def_prop_ro("block", &PyOperation::getBlock, + "Returns the block containing this operation.") .def_prop_ro( "successors", [](PyOperationBase &self) { @@ -3830,7 +4034,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, nb::arg("cls"), nb::arg("source"), nb::kw_only(), nb::arg("source_name") = "", nb::arg("context") = nb::none(), - "Parses a specific, generated OpView based on class level attributes"); + "Parses a specific, generated OpView based on class level attributes."); //---------------------------------------------------------------------------- // Mapping of PyRegion. @@ -3856,17 +4060,22 @@ void mlir::python::populateIRCore(nb::module_ &m) { return PyBlockIterator(self.getParentOperation(), firstBlock); }, "Iterates over blocks in the region.") - .def("__eq__", - [](PyRegion &self, PyRegion &other) { - return self.get().ptr == other.get().ptr; - }) - .def("__eq__", [](PyRegion &self, nb::object &other) { return false; }); + .def( + "__eq__", + [](PyRegion &self, PyRegion &other) { + return self.get().ptr == other.get().ptr; + }, + "Compares two regions for pointer equality.") + .def( + "__eq__", [](PyRegion &self, nb::object &other) { return false; }, + "Compares region with non-region object (always returns False)."); //---------------------------------------------------------------------------- // Mapping of PyBlock. //---------------------------------------------------------------------------- nb::class_(m, "Block") - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyBlock::getCapsule) + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyBlock::getCapsule, + "Gets a capsule wrapping the MlirBlock.") .def_prop_ro( "owner", [](PyBlock &self) -> nb::typed { @@ -3893,14 +4102,26 @@ void mlir::python::populateIRCore(nb::module_ &m) { mlirBlockAddArgument(self.get(), type, loc)); }, "type"_a, "loc"_a, - "Append an argument of the specified type to the block and returns " - "the newly added argument.") + R"( + Appends an argument of the specified type to the block. + + Args: + type: The type of the argument to add. + loc: The source location for the argument. + + Returns: + The newly added block argument.)") .def( "erase_argument", [](PyBlock &self, unsigned index) { return mlirBlockEraseArgument(self.get(), index); }, - "Erase the argument at 'index' and remove it from the argument list.") + nb::arg("index"), + R"( + Erases the argument at the specified index. + + Args: + index: The index of the argument to erase.)") .def_prop_ro( "operations", [](PyBlock &self) { @@ -3928,7 +4149,14 @@ void mlir::python::populateIRCore(nb::module_ &m) { mlirBlockDetach(b); mlirRegionAppendOwnedBlock(region.get(), b); }, - "Append this block to a region, transferring ownership if necessary") + nb::arg("region"), + R"( + Appends this block to a region. + + Transfers ownership if the block is currently owned by another region. + + Args: + region: The region to append the block to.)") .def( "create_before", [](PyBlock &self, const nb::args &pyArgTypes, @@ -3969,15 +4197,21 @@ void mlir::python::populateIRCore(nb::module_ &m) { firstOperation); }, "Iterates over operations in the block.") - .def("__eq__", - [](PyBlock &self, PyBlock &other) { - return self.get().ptr == other.get().ptr; - }) - .def("__eq__", [](PyBlock &self, nb::object &other) { return false; }) - .def("__hash__", - [](PyBlock &self) { - return static_cast(llvm::hash_value(self.get().ptr)); - }) + .def( + "__eq__", + [](PyBlock &self, PyBlock &other) { + return self.get().ptr == other.get().ptr; + }, + "Compares two blocks for pointer equality.") + .def( + "__eq__", [](PyBlock &self, nb::object &other) { return false; }, + "Compares block with non-block object (always returns False).") + .def( + "__hash__", + [](PyBlock &self) { + return static_cast(llvm::hash_value(self.get().ptr)); + }, + "Returns the hash value of the block.") .def( "__str__", [](PyBlock &self) { @@ -4000,8 +4234,13 @@ void mlir::python::populateIRCore(nb::module_ &m) { self.getParentOperation().getObject()); }, nb::arg("operation"), - "Appends an operation to this block. If the operation is currently " - "in another block, it will be moved.") + R"( + Appends an operation to this block. + + If the operation is currently in another block, it will be moved. + + Args: + operation: The operation to append to the block.)") .def_prop_ro( "successors", [](PyBlock &self) { @@ -4022,10 +4261,12 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::class_(m, "InsertionPoint") .def(nb::init(), nb::arg("block"), "Inserts after the last operation but still inside the block.") - .def("__enter__", &PyInsertionPoint::contextEnter) + .def("__enter__", &PyInsertionPoint::contextEnter, + "Enters the insertion point as a context manager.") .def("__exit__", &PyInsertionPoint::contextExit, nb::arg("exc_type").none(), nb::arg("exc_value").none(), - nb::arg("traceback").none()) + nb::arg("traceback").none(), + "Exits the insertion point context manager.") .def_prop_ro_static( "current", [](nb::object & /*class*/) { @@ -4036,20 +4277,50 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, nb::sig("def current(/) -> InsertionPoint"), "Gets the InsertionPoint bound to the current thread or raises " - "ValueError if none has been set") + "ValueError if none has been set.") .def(nb::init(), nb::arg("beforeOperation"), "Inserts before a referenced operation.") .def_static("at_block_begin", &PyInsertionPoint::atBlockBegin, - nb::arg("block"), "Inserts at the beginning of the block.") + nb::arg("block"), + R"( + Creates an insertion point at the beginning of a block. + + Args: + block: The block at whose beginning operations should be inserted. + + Returns: + An InsertionPoint at the block's beginning.)") .def_static("at_block_terminator", &PyInsertionPoint::atBlockTerminator, - nb::arg("block"), "Inserts before the block terminator.") + nb::arg("block"), + R"( + Creates an insertion point before a block's terminator. + + Args: + block: The block whose terminator to insert before. + + Returns: + An InsertionPoint before the terminator. + + Raises: + ValueError: If the block has no terminator.)") .def_static("after", &PyInsertionPoint::after, nb::arg("operation"), - "Inserts after the operation.") + R"( + Creates an insertion point immediately after an operation. + + Args: + operation: The operation after which to insert. + + Returns: + An InsertionPoint after the operation.)") .def("insert", &PyInsertionPoint::insert, nb::arg("operation"), - "Inserts an operation.") + R"( + Inserts an operation at this insertion point. + + Args: + operation: The operation to insert.)") .def_prop_ro( "block", [](PyInsertionPoint &self) { return self.getBlock(); }, - "Returns the block that this InsertionPoint points to.") + "Returns the block that this `InsertionPoint` points to.") .def_prop_ro( "ref_operation", [](PyInsertionPoint &self) @@ -4061,7 +4332,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, "The reference operation before which new operations are " "inserted, or None if the insertion point is at the end of " - "the block"); + "the block."); //---------------------------------------------------------------------------- // Mapping of PyAttribute. @@ -4070,10 +4341,12 @@ void mlir::python::populateIRCore(nb::module_ &m) { // Delegate to the PyAttribute copy constructor, which will also lifetime // extend the backing context which owns the MlirAttribute. .def(nb::init(), nb::arg("cast_from_type"), - "Casts the passed attribute to the generic Attribute") - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyAttribute::getCapsule) - .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, - &PyAttribute::createFromCapsule) + "Casts the passed attribute to the generic `Attribute`.") + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyAttribute::getCapsule, + "Gets a capsule wrapping the MlirAttribute.") + .def_static( + MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyAttribute::createFromCapsule, + "Creates an Attribute from a capsule wrapping `MlirAttribute`.") .def_static( "parse", [](const std::string &attrSpec, DefaultingPyMlirContext context) @@ -4086,33 +4359,49 @@ void mlir::python::populateIRCore(nb::module_ &m) { return PyAttribute(context.get()->getRef(), attr).maybeDownCast(); }, nb::arg("asm"), nb::arg("context") = nb::none(), - "Parses an attribute from an assembly form. Raises an MLIRError on " + "Parses an attribute from an assembly form. Raises an `MLIRError` on " "failure.") .def_prop_ro( "context", [](PyAttribute &self) -> nb::typed { return self.getContext().getObject(); }, - "Context that owns the Attribute") - .def_prop_ro("type", - [](PyAttribute &self) -> nb::typed { - return PyType(self.getContext(), - mlirAttributeGetType(self)) - .maybeDownCast(); - }) + "Context that owns the `Attribute`.") + .def_prop_ro( + "type", + [](PyAttribute &self) -> nb::typed { + return PyType(self.getContext(), mlirAttributeGetType(self)) + .maybeDownCast(); + }, + "Returns the type of the `Attribute`.") .def( "get_named", [](PyAttribute &self, std::string name) { return PyNamedAttribute(self, std::move(name)); }, - nb::keep_alive<0, 1>(), "Binds a name to the attribute") - .def("__eq__", - [](PyAttribute &self, PyAttribute &other) { return self == other; }) - .def("__eq__", [](PyAttribute &self, nb::object &other) { return false; }) - .def("__hash__", - [](PyAttribute &self) { - return static_cast(llvm::hash_value(self.get().ptr)); - }) + nb::keep_alive<0, 1>(), + R"( + Binds a name to the attribute, creating a `NamedAttribute`. + + Args: + name: The name to bind to the `Attribute`. + + Returns: + A `NamedAttribute` with the given name and this attribute.)") + .def( + "__eq__", + [](PyAttribute &self, PyAttribute &other) { return self == other; }, + "Compares two attributes for equality.") + .def( + "__eq__", [](PyAttribute &self, nb::object &other) { return false; }, + "Compares attribute with non-attribute object (always returns " + "False).") + .def( + "__hash__", + [](PyAttribute &self) { + return static_cast(llvm::hash_value(self.get().ptr)); + }, + "Returns the hash value of the attribute.") .def( "dump", [](PyAttribute &self) { mlirAttributeDump(self); }, kDumpDocstring) @@ -4125,61 +4414,69 @@ void mlir::python::populateIRCore(nb::module_ &m) { return printAccum.join(); }, "Returns the assembly form of the Attribute.") - .def("__repr__", - [](PyAttribute &self) { - // Generally, assembly formats are not printed for __repr__ because - // this can cause exceptionally long debug output and exceptions. - // However, attribute values are generally considered useful and - // are printed. This may need to be re-evaluated if debug dumps end - // up being excessive. - PyPrintAccumulator printAccum; - printAccum.parts.append("Attribute("); - mlirAttributePrint(self, printAccum.getCallback(), - printAccum.getUserData()); - printAccum.parts.append(")"); - return printAccum.join(); - }) - .def_prop_ro("typeid", - [](PyAttribute &self) { - MlirTypeID mlirTypeID = mlirAttributeGetTypeID(self); - assert(!mlirTypeIDIsNull(mlirTypeID) && - "mlirTypeID was expected to be non-null."); - return PyTypeID(mlirTypeID); - }) - .def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, - [](PyAttribute &self) -> nb::typed { - return self.maybeDownCast(); - }); + .def( + "__repr__", + [](PyAttribute &self) { + // Generally, assembly formats are not printed for __repr__ because + // this can cause exceptionally long debug output and exceptions. + // However, attribute values are generally considered useful and + // are printed. This may need to be re-evaluated if debug dumps end + // up being excessive. + PyPrintAccumulator printAccum; + printAccum.parts.append("Attribute("); + mlirAttributePrint(self, printAccum.getCallback(), + printAccum.getUserData()); + printAccum.parts.append(")"); + return printAccum.join(); + }, + "Returns a string representation of the attribute.") + .def_prop_ro( + "typeid", + [](PyAttribute &self) { + MlirTypeID mlirTypeID = mlirAttributeGetTypeID(self); + assert(!mlirTypeIDIsNull(mlirTypeID) && + "mlirTypeID was expected to be non-null."); + return PyTypeID(mlirTypeID); + }, + "Returns the `TypeID` of the attribute.") + .def( + MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, + [](PyAttribute &self) -> nb::typed { + return self.maybeDownCast(); + }, + "Downcasts the attribute to a more specific attribute if possible."); //---------------------------------------------------------------------------- // Mapping of PyNamedAttribute //---------------------------------------------------------------------------- nb::class_(m, "NamedAttribute") - .def("__repr__", - [](PyNamedAttribute &self) { - PyPrintAccumulator printAccum; - printAccum.parts.append("NamedAttribute("); - printAccum.parts.append( - nb::str(mlirIdentifierStr(self.namedAttr.name).data, - mlirIdentifierStr(self.namedAttr.name).length)); - printAccum.parts.append("="); - mlirAttributePrint(self.namedAttr.attribute, - printAccum.getCallback(), - printAccum.getUserData()); - printAccum.parts.append(")"); - return printAccum.join(); - }) + .def( + "__repr__", + [](PyNamedAttribute &self) { + PyPrintAccumulator printAccum; + printAccum.parts.append("NamedAttribute("); + printAccum.parts.append( + nb::str(mlirIdentifierStr(self.namedAttr.name).data, + mlirIdentifierStr(self.namedAttr.name).length)); + printAccum.parts.append("="); + mlirAttributePrint(self.namedAttr.attribute, + printAccum.getCallback(), + printAccum.getUserData()); + printAccum.parts.append(")"); + return printAccum.join(); + }, + "Returns a string representation of the named attribute.") .def_prop_ro( "name", [](PyNamedAttribute &self) { return mlirIdentifierStr(self.namedAttr.name); }, - "The name of the NamedAttribute binding") + "The name of the `NamedAttribute` binding.") .def_prop_ro( "attr", [](PyNamedAttribute &self) { return self.namedAttr.attribute; }, nb::keep_alive<0, 1>(), nb::sig("def attr(self) -> Attribute"), - "The underlying generic attribute of the NamedAttribute binding"); + "The underlying generic attribute of the `NamedAttribute` binding."); //---------------------------------------------------------------------------- // Mapping of PyType. @@ -4188,9 +4485,11 @@ void mlir::python::populateIRCore(nb::module_ &m) { // Delegate to the PyType copy constructor, which will also lifetime // extend the backing context which owns the MlirType. .def(nb::init(), nb::arg("cast_from_type"), - "Casts the passed type to the generic Type") - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyType::getCapsule) - .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyType::createFromCapsule) + "Casts the passed type to the generic `Type`.") + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyType::getCapsule, + "Gets a capsule wrapping the `MlirType`.") + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyType::createFromCapsule, + "Creates a Type from a capsule wrapping `MlirType`.") .def_static( "parse", [](std::string typeSpec, @@ -4203,21 +4502,31 @@ void mlir::python::populateIRCore(nb::module_ &m) { return PyType(context.get()->getRef(), type).maybeDownCast(); }, nb::arg("asm"), nb::arg("context") = nb::none(), - kContextParseTypeDocstring) + R"( + Parses the assembly form of a type. + + Returns a Type object or raises an `MLIRError` if the type cannot be parsed. + + See also: https://mlir.llvm.org/docs/LangRef/#type-system)") .def_prop_ro( "context", [](PyType &self) -> nb::typed { return self.getContext().getObject(); }, - "Context that owns the Type") - .def("__eq__", [](PyType &self, PyType &other) { return self == other; }) + "Context that owns the `Type`.") + .def( + "__eq__", [](PyType &self, PyType &other) { return self == other; }, + "Compares two types for equality.") .def( "__eq__", [](PyType &self, nb::object &other) { return false; }, - nb::arg("other").none()) - .def("__hash__", - [](PyType &self) { - return static_cast(llvm::hash_value(self.get().ptr)); - }) + nb::arg("other").none(), + "Compares type with non-type object (always returns False).") + .def( + "__hash__", + [](PyType &self) { + return static_cast(llvm::hash_value(self.get().ptr)); + }, + "Returns the hash value of the `Type`.") .def( "dump", [](PyType &self) { mlirTypeDump(self); }, kDumpDocstring) .def( @@ -4228,60 +4537,81 @@ void mlir::python::populateIRCore(nb::module_ &m) { printAccum.getUserData()); return printAccum.join(); }, - "Returns the assembly form of the type.") - .def("__repr__", - [](PyType &self) { - // Generally, assembly formats are not printed for __repr__ because - // this can cause exceptionally long debug output and exceptions. - // However, types are an exception as they typically have compact - // assembly forms and printing them is useful. - PyPrintAccumulator printAccum; - printAccum.parts.append("Type("); - mlirTypePrint(self, printAccum.getCallback(), - printAccum.getUserData()); - printAccum.parts.append(")"); - return printAccum.join(); - }) - .def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, - [](PyType &self) -> nb::typed { - return self.maybeDownCast(); - }) - .def_prop_ro("typeid", [](PyType &self) { - MlirTypeID mlirTypeID = mlirTypeGetTypeID(self); - if (!mlirTypeIDIsNull(mlirTypeID)) - return PyTypeID(mlirTypeID); - auto origRepr = nb::cast(nb::repr(nb::cast(self))); - throw nb::value_error( - (origRepr + llvm::Twine(" has no typeid.")).str().c_str()); - }); + "Returns the assembly form of the `Type`.") + .def( + "__repr__", + [](PyType &self) { + // Generally, assembly formats are not printed for __repr__ because + // this can cause exceptionally long debug output and exceptions. + // However, types are an exception as they typically have compact + // assembly forms and printing them is useful. + PyPrintAccumulator printAccum; + printAccum.parts.append("Type("); + mlirTypePrint(self, printAccum.getCallback(), + printAccum.getUserData()); + printAccum.parts.append(")"); + return printAccum.join(); + }, + "Returns a string representation of the `Type`.") + .def( + MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, + [](PyType &self) -> nb::typed { + return self.maybeDownCast(); + }, + "Downcasts the Type to a more specific `Type` if possible.") + .def_prop_ro( + "typeid", + [](PyType &self) { + MlirTypeID mlirTypeID = mlirTypeGetTypeID(self); + if (!mlirTypeIDIsNull(mlirTypeID)) + return PyTypeID(mlirTypeID); + auto origRepr = nb::cast(nb::repr(nb::cast(self))); + throw nb::value_error( + (origRepr + llvm::Twine(" has no typeid.")).str().c_str()); + }, + "Returns the `TypeID` of the `Type`, or raises `ValueError` if " + "`Type` has no " + "`TypeID`."); //---------------------------------------------------------------------------- // Mapping of PyTypeID. //---------------------------------------------------------------------------- nb::class_(m, "TypeID") - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyTypeID::getCapsule) - .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyTypeID::createFromCapsule) + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyTypeID::getCapsule, + "Gets a capsule wrapping the `MlirTypeID`.") + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyTypeID::createFromCapsule, + "Creates a `TypeID` from a capsule wrapping `MlirTypeID`.") // Note, this tests whether the underlying TypeIDs are the same, // not whether the wrapper MlirTypeIDs are the same, nor whether // the Python objects are the same (i.e., PyTypeID is a value type). - .def("__eq__", - [](PyTypeID &self, PyTypeID &other) { return self == other; }) - .def("__eq__", - [](PyTypeID &self, const nb::object &other) { return false; }) + .def( + "__eq__", + [](PyTypeID &self, PyTypeID &other) { return self == other; }, + "Compares two `TypeID`s for equality.") + .def( + "__eq__", + [](PyTypeID &self, const nb::object &other) { return false; }, + "Compares TypeID with non-TypeID object (always returns False).") // Note, this gives the hash value of the underlying TypeID, not the // hash value of the Python object, nor the hash value of the // MlirTypeID wrapper. - .def("__hash__", [](PyTypeID &self) { - return static_cast(mlirTypeIDHashValue(self)); - }); + .def( + "__hash__", + [](PyTypeID &self) { + return static_cast(mlirTypeIDHashValue(self)); + }, + "Returns the hash value of the `TypeID`."); //---------------------------------------------------------------------------- // Mapping of Value. //---------------------------------------------------------------------------- nb::class_(m, "Value") - .def(nb::init(), nb::keep_alive<0, 1>(), nb::arg("value")) - .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyValue::getCapsule) - .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyValue::createFromCapsule) + .def(nb::init(), nb::keep_alive<0, 1>(), nb::arg("value"), + "Creates a Value reference from another `Value`.") + .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyValue::getCapsule, + "Gets a capsule wrapping the `MlirValue`.") + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyValue::createFromCapsule, + "Creates a `Value` from a capsule wrapping `MlirValue`.") .def_prop_ro( "context", [](PyValue &self) -> nb::typed { @@ -4312,23 +4642,30 @@ void mlir::python::populateIRCore(nb::module_ &m) { assert(false && "Value must be a block argument or an op result"); return nb::none(); }, - // clang-format off - nb::sig("def owner(self) -> Operation | Block | None")) - // clang-format on - .def_prop_ro("uses", - [](PyValue &self) { - return PyOpOperandIterator( - mlirValueGetFirstUse(self.get())); - }) - .def("__eq__", - [](PyValue &self, PyValue &other) { - return self.get().ptr == other.get().ptr; - }) - .def("__eq__", [](PyValue &self, nb::object other) { return false; }) - .def("__hash__", - [](PyValue &self) { - return static_cast(llvm::hash_value(self.get().ptr)); - }) + "Returns the owner of the value (`Operation` for results, `Block` " + "for " + "arguments).") + .def_prop_ro( + "uses", + [](PyValue &self) { + return PyOpOperandIterator(mlirValueGetFirstUse(self.get())); + }, + "Returns an iterator over uses of this value.") + .def( + "__eq__", + [](PyValue &self, PyValue &other) { + return self.get().ptr == other.get().ptr; + }, + "Compares two values for pointer equality.") + .def( + "__eq__", [](PyValue &self, nb::object other) { return false; }, + "Compares value with non-value object (always returns False).") + .def( + "__hash__", + [](PyValue &self) { + return static_cast(llvm::hash_value(self.get().ptr)); + }, + "Returns the hash value of the value.") .def( "__str__", [](PyValue &self) { @@ -4339,7 +4676,13 @@ void mlir::python::populateIRCore(nb::module_ &m) { printAccum.parts.append(")"); return printAccum.join(); }, - kValueDunderStrDocstring) + R"( + Returns the string form of the value. + + If the value is a block argument, this is the assembly form of its type and the + position in the argument list. If the value is an operation result, this is + equivalent to printing the operation that produced it. + )") .def( "get_name", [](PyValue &self, bool useLocalScope, bool useNameLocAsPrefix) { @@ -4359,7 +4702,16 @@ void mlir::python::populateIRCore(nb::module_ &m) { return printAccum.join(); }, nb::arg("use_local_scope") = false, - nb::arg("use_name_loc_as_prefix") = false) + nb::arg("use_name_loc_as_prefix") = false, + R"( + Returns the string form of value as an operand. + + Args: + use_local_scope: Whether to use local scope for naming. + use_name_loc_as_prefix: Whether to use the location attribute (NameLoc) as prefix. + + Returns: + The value's name as it appears in IR (e.g., `%0`, `%arg0`).)") .def( "get_name", [](PyValue &self, PyAsmState &state) { @@ -4370,25 +4722,29 @@ void mlir::python::populateIRCore(nb::module_ &m) { printAccum.getUserData()); return printAccum.join(); }, - nb::arg("state"), kGetNameAsOperand) - .def_prop_ro("type", - [](PyValue &self) -> nb::typed { - return PyType(self.getParentOperation()->getContext(), - mlirValueGetType(self.get())) - .maybeDownCast(); - }) + nb::arg("state"), + "Returns the string form of value as an operand (i.e., the ValueID).") + .def_prop_ro( + "type", + [](PyValue &self) -> nb::typed { + return PyType(self.getParentOperation()->getContext(), + mlirValueGetType(self.get())) + .maybeDownCast(); + }, + "Returns the type of the value.") .def( "set_type", [](PyValue &self, const PyType &type) { - return mlirValueSetType(self.get(), type); + mlirValueSetType(self.get(), type); }, - nb::arg("type")) + nb::arg("type"), "Sets the type of the value.") .def( "replace_all_uses_with", [](PyValue &self, PyValue &with) { mlirValueReplaceAllUsesOfWith(self.get(), with.get()); }, - kValueReplaceAllUsesWithDocstring) + "Replace all uses of value with the new value, updating anything in " + "the IR that uses `self` to use the other value instead.") .def( "replace_all_uses_except", [](PyValue &self, PyValue &with, PyOperation &exception) { @@ -4434,10 +4790,12 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, nb::arg("with_"), nb::arg("exceptions"), kValueReplaceAllUsesExceptDocstring) - .def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, - [](PyValue &self) -> nb::typed { - return self.maybeDownCast(); - }) + .def( + MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, + [](PyValue &self) -> nb::typed { + return self.maybeDownCast(); + }, + "Downcasts the `Value` to a more specific kind if possible.") .def_prop_ro( "location", [](MlirValue self) { @@ -4445,7 +4803,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { PyMlirContext::forContext(mlirValueGetContext(self)), mlirValueGetLocation(self)); }, - "Returns the source location the value"); + "Returns the source location of the value."); PyBlockArgument::bind(m); PyOpResult::bind(m); @@ -4453,43 +4811,105 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::class_(m, "AsmState") .def(nb::init(), nb::arg("value"), - nb::arg("use_local_scope") = false) + nb::arg("use_local_scope") = false, + R"( + Creates an `AsmState` for consistent SSA value naming. + + Args: + value: The value to create state for. + use_local_scope: Whether to use local scope for naming.)") .def(nb::init(), nb::arg("op"), - nb::arg("use_local_scope") = false); + nb::arg("use_local_scope") = false, + R"( + Creates an AsmState for consistent SSA value naming. + + Args: + op: The operation to create state for. + use_local_scope: Whether to use local scope for naming.)"); //---------------------------------------------------------------------------- // Mapping of SymbolTable. //---------------------------------------------------------------------------- nb::class_(m, "SymbolTable") - .def(nb::init()) - .def("__getitem__", - [](PySymbolTable &self, - const std::string &name) -> nb::typed { - return self.dunderGetItem(name); - }) - .def("insert", &PySymbolTable::insert, nb::arg("operation")) - .def("erase", &PySymbolTable::erase, nb::arg("operation")) - .def("__delitem__", &PySymbolTable::dunderDel) - .def("__contains__", - [](PySymbolTable &table, const std::string &name) { - return !mlirOperationIsNull(mlirSymbolTableLookup( - table, mlirStringRefCreate(name.data(), name.length()))); - }) + .def(nb::init(), + R"( + Creates a symbol table for an operation. + + Args: + operation: The `Operation` that defines a symbol table (e.g., a `ModuleOp`). + + Raises: + TypeError: If the operation is not a symbol table.)") + .def( + "__getitem__", + [](PySymbolTable &self, + const std::string &name) -> nb::typed { + return self.dunderGetItem(name); + }, + R"( + Looks up a symbol by name in the symbol table. + + Args: + name: The name of the symbol to look up. + + Returns: + The operation defining the symbol. + + Raises: + KeyError: If the symbol is not found.)") + .def("insert", &PySymbolTable::insert, nb::arg("operation"), + R"( + Inserts a symbol operation into the symbol table. + + Args: + operation: An operation with a symbol name to insert. + + Returns: + The symbol name attribute of the inserted operation. + + Raises: + ValueError: If the operation does not have a symbol name.)") + .def("erase", &PySymbolTable::erase, nb::arg("operation"), + R"( + Erases a symbol operation from the symbol table. + + Args: + operation: The symbol operation to erase. + + Note: + The operation is also erased from the IR and invalidated.)") + .def("__delitem__", &PySymbolTable::dunderDel, + "Deletes a symbol by name from the symbol table.") + .def( + "__contains__", + [](PySymbolTable &table, const std::string &name) { + return !mlirOperationIsNull(mlirSymbolTableLookup( + table, mlirStringRefCreate(name.data(), name.length()))); + }, + "Checks if a symbol with the given name exists in the table.") // Static helpers. .def_static("set_symbol_name", &PySymbolTable::setSymbolName, - nb::arg("symbol"), nb::arg("name")) + nb::arg("symbol"), nb::arg("name"), + "Sets the symbol name for a symbol operation.") .def_static("get_symbol_name", &PySymbolTable::getSymbolName, - nb::arg("symbol")) + nb::arg("symbol"), + "Gets the symbol name from a symbol operation.") .def_static("get_visibility", &PySymbolTable::getVisibility, - nb::arg("symbol")) + nb::arg("symbol"), + "Gets the visibility attribute of a symbol operation.") .def_static("set_visibility", &PySymbolTable::setVisibility, - nb::arg("symbol"), nb::arg("visibility")) + nb::arg("symbol"), nb::arg("visibility"), + "Sets the visibility attribute of a symbol operation.") .def_static("replace_all_symbol_uses", &PySymbolTable::replaceAllSymbolUses, nb::arg("old_symbol"), - nb::arg("new_symbol"), nb::arg("from_op")) + nb::arg("new_symbol"), nb::arg("from_op"), + "Replaces all uses of a symbol with a new symbol name within " + "the given operation.") .def_static("walk_symbol_tables", &PySymbolTable::walkSymbolTables, nb::arg("from_op"), nb::arg("all_sym_uses_visible"), - nb::arg("callback")); + nb::arg("callback"), + "Walks symbol tables starting from an operation with a " + "callback function."); // Container bindings. PyBlockArgumentList::bind(m); diff --git a/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp b/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp index de552ce22ef62..705298f497d20 100644 --- a/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp +++ b/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp @@ -186,9 +186,6 @@ class CreateNdDescToXeVMPattern SmallVector mixedSizes = op.getMixedSizes(); // Descriptor shape is expected to be 2D. int64_t rank = mixedSizes.size(); - if (rank != 2) - return rewriter.notifyMatchFailure(op, "Expected 2D shape."); - auto sourceTy = source.getType(); auto sourceMemrefTy = dyn_cast(sourceTy); // If source is a memref, we need to extract the aligned pointer as index. @@ -199,8 +196,19 @@ class CreateNdDescToXeVMPattern } baseAddr = memref::ExtractAlignedPointerAsIndexOp::create(rewriter, loc, source); + // Cast index to i64. + baseAddr = arith::IndexCastUIOp::create(rewriter, loc, i64Ty, baseAddr); } else { baseAddr = adaptor.getSource(); + if (baseAddr.getType() != i64Ty) { + // Pointer type may be i32. Cast to i64 if needed. + baseAddr = arith::ExtUIOp::create(rewriter, loc, i64Ty, baseAddr); + } + } + // 1D tensor descriptor is just the base address. + if (rank == 1) { + rewriter.replaceOp(op, baseAddr); + return success(); } // Utility for creating offset values from op fold result. auto createOffset = [&](SmallVector &ofrVec, @@ -215,13 +223,6 @@ class CreateNdDescToXeVMPattern // Get shape values from op fold results. baseShapeW = createOffset(mixedSizes, 1); baseShapeH = createOffset(mixedSizes, 0); - if (sourceMemrefTy) { - // Cast index to i64. - baseAddr = arith::IndexCastUIOp::create(rewriter, loc, i64Ty, baseAddr); - } else if (baseAddr.getType() != i64Ty) { - // Pointer type may be i32. Cast to i64 if needed. - baseAddr = arith::ExtUIOp::create(rewriter, loc, i64Ty, baseAddr); - } // Populate payload. Value payLoadAsI64 = vector::BitCastOp::create(rewriter, loc, payloadI64Ty, payload); @@ -257,108 +258,175 @@ class LoadStorePrefetchNdToXeVMPattern : public OpConversionPattern { ConversionPatternRewriter &rewriter) const override { auto mixedOffsets = op.getMixedOffsets(); int64_t opOffsetsSize = mixedOffsets.size(); - if (opOffsetsSize != 2) - return rewriter.notifyMatchFailure(op, "Expected 2D offsets."); auto loc = op.getLoc(); auto ctxt = rewriter.getContext(); auto tdesc = adaptor.getTensorDesc(); auto tdescTy = op.getTensorDescType(); - if (tdescTy.getRank() != 2) - return rewriter.notifyMatchFailure(op, "Expected 2D tensor descriptor."); + auto tileRank = tdescTy.getRank(); + if (opOffsetsSize != tileRank) + return rewriter.notifyMatchFailure( + op, "Expected offset rank to match descriptor rank."); auto elemType = tdescTy.getElementType(); auto elemBitSize = elemType.getIntOrFloatBitWidth(); if (elemBitSize % 8 != 0) return rewriter.notifyMatchFailure( op, "Expected element type bit width to be multiple of 8."); - VectorType payloadI64Ty = VectorType::get(4, rewriter.getI64Type()); - Value payLoadAsI64 = - vector::BitCastOp::create(rewriter, loc, payloadI64Ty, tdesc); - Value basePtr = vector::ExtractOp::create( - rewriter, loc, payLoadAsI64, static_cast(NdTdescOffset::BasePtr)); - Value baseShapeW = vector::ExtractOp::create( - rewriter, loc, tdesc, static_cast(NdTdescOffset::BaseShapeW)); - Value baseShapeH = vector::ExtractOp::create( - rewriter, loc, tdesc, static_cast(NdTdescOffset::BaseShapeH)); - // Offsets are provided by the op. - // convert them to i32. - Value offsetW = - getValueOrCreateConstantIntOp(rewriter, loc, mixedOffsets[1]); - offsetW = getValueOrCreateCastToIndexLike(rewriter, loc, - rewriter.getI32Type(), offsetW); - Value offsetH = - getValueOrCreateConstantIntOp(rewriter, loc, mixedOffsets[0]); - offsetH = getValueOrCreateCastToIndexLike(rewriter, loc, - rewriter.getI32Type(), offsetH); // Get address space from tensor descriptor memory space. auto ptrTypeLLVM = LLVM::LLVMPointerType::get( ctxt, getNumericXeVMAddrSpace(tdescTy.getMemorySpace())); - // Convert base pointer (i64) to LLVM pointer type. - Value basePtrLLVM = - LLVM::IntToPtrOp::create(rewriter, loc, ptrTypeLLVM, basePtr); - // Compute element byte size and surface width in bytes. - Value elemByteSize = arith::ConstantIntOp::create( - rewriter, loc, rewriter.getI32Type(), elemBitSize / 8); - Value surfaceW = - arith::MulIOp::create(rewriter, loc, baseShapeW, elemByteSize); - - // Get tile sizes and vblocks from the tensor descriptor type. - auto tileW = tdescTy.getDimSize(1); - auto tileH = tdescTy.getDimSize(0); - int32_t vblocks = tdescTy.getArrayLength(); - if constexpr (std::is_same_v) { - Value src = adaptor.getValue(); - // If store value is a scalar, get value from op instead of adaptor. - // Adaptor might have optimized away single element vector - if (src.getType().isIntOrFloat()) { - src = op.getValue(); - } - VectorType srcVecTy = dyn_cast(src.getType()); - if (!srcVecTy) - return rewriter.notifyMatchFailure( - op, "Expected store value to be a vector type."); - // Get flat vector type of integer type with matching element bit size. - VectorType newSrcVecTy = - encodeVectorTypeTo(srcVecTy, rewriter.getIntegerType(elemBitSize)); - if (srcVecTy != newSrcVecTy) - src = vector::BitCastOp::create(rewriter, loc, newSrcVecTy, src); - auto storeCacheControl = - translateStoreXeGPUCacheHint(op.getL1Hint(), op.getL3Hint()); - xevm::BlockStore2dOp::create( - rewriter, loc, basePtrLLVM, surfaceW, baseShapeH, surfaceW, offsetW, - offsetH, elemBitSize, tileW, tileH, src, - xevm::StoreCacheControlAttr::get(ctxt, storeCacheControl)); - rewriter.eraseOp(op); - } else { - auto loadCacheControl = - translateLoadXeGPUCacheHint(op.getL1Hint(), op.getL3Hint()); - if constexpr (std::is_same_v) { - xevm::BlockPrefetch2dOp::create( + if (tileRank == 2) { + // Compute element byte size. + Value elemByteSize = arith::ConstantIntOp::create( + rewriter, loc, rewriter.getI32Type(), elemBitSize / 8); + VectorType payloadI64Ty = VectorType::get(4, rewriter.getI64Type()); + Value payLoadAsI64 = + vector::BitCastOp::create(rewriter, loc, payloadI64Ty, tdesc); + Value basePtr = + vector::ExtractOp::create(rewriter, loc, payLoadAsI64, + static_cast(NdTdescOffset::BasePtr)); + Value baseShapeW = vector::ExtractOp::create( + rewriter, loc, tdesc, static_cast(NdTdescOffset::BaseShapeW)); + Value baseShapeH = vector::ExtractOp::create( + rewriter, loc, tdesc, static_cast(NdTdescOffset::BaseShapeH)); + // Offsets are provided by the op. + // convert them to i32. + Value offsetW = + getValueOrCreateConstantIntOp(rewriter, loc, mixedOffsets[1]); + offsetW = getValueOrCreateCastToIndexLike(rewriter, loc, + rewriter.getI32Type(), offsetW); + Value offsetH = + getValueOrCreateConstantIntOp(rewriter, loc, mixedOffsets[0]); + offsetH = getValueOrCreateCastToIndexLike(rewriter, loc, + rewriter.getI32Type(), offsetH); + // Convert base pointer (i64) to LLVM pointer type. + Value basePtrLLVM = + LLVM::IntToPtrOp::create(rewriter, loc, ptrTypeLLVM, basePtr); + // Compute width in bytes. + Value surfaceW = + arith::MulIOp::create(rewriter, loc, baseShapeW, elemByteSize); + + // Get tile width from the tensor descriptor type. + auto tileW = tdescTy.getDimSize(tileRank - 1); + // Get tile height from the tensor descriptor type. + auto tileH = tdescTy.getDimSize(0); + // Get vblocks from the tensor descriptor type. + int32_t vblocks = tdescTy.getArrayLength(); + if constexpr (std::is_same_v) { + Value src = adaptor.getValue(); + // If store value is a scalar, get value from op instead of adaptor. + // Adaptor might have optimized away single element vector + if (src.getType().isIntOrFloat()) { + src = op.getValue(); + } + VectorType srcVecTy = dyn_cast(src.getType()); + if (!srcVecTy) + return rewriter.notifyMatchFailure( + op, "Expected store value to be a vector type."); + // Get flat vector type of integer type with matching element bit size. + VectorType newSrcVecTy = + encodeVectorTypeTo(srcVecTy, rewriter.getIntegerType(elemBitSize)); + if (srcVecTy != newSrcVecTy) + src = vector::BitCastOp::create(rewriter, loc, newSrcVecTy, src); + auto storeCacheControl = + translateStoreXeGPUCacheHint(op.getL1Hint(), op.getL3Hint()); + xevm::BlockStore2dOp::create( rewriter, loc, basePtrLLVM, surfaceW, baseShapeH, surfaceW, offsetW, - offsetH, elemBitSize, tileW, tileH, vblocks, - xevm::LoadCacheControlAttr::get(ctxt, loadCacheControl)); + offsetH, elemBitSize, tileW, tileH, src, + xevm::StoreCacheControlAttr::get(ctxt, storeCacheControl)); rewriter.eraseOp(op); } else { - VectorType dstVecTy = cast(op.getValue().getType()); - const bool vnni = op.getPacked().value_or(false); - auto transposeValue = op.getTranspose(); - bool transpose = - transposeValue.has_value() && transposeValue.value()[0] == 1; - VectorType loadedTy = encodeVectorTypeTo( - dstVecTy, vnni ? rewriter.getI32Type() - : rewriter.getIntegerType(elemBitSize)); - - Value resultFlatVec = xevm::BlockLoad2dOp::create( - rewriter, loc, loadedTy, basePtrLLVM, surfaceW, baseShapeH, - surfaceW, offsetW, offsetH, elemBitSize, tileW, tileH, vblocks, - transpose, vnni, + auto loadCacheControl = + translateLoadXeGPUCacheHint(op.getL1Hint(), op.getL3Hint()); + if constexpr (std::is_same_v) { + xevm::BlockPrefetch2dOp::create( + rewriter, loc, basePtrLLVM, surfaceW, baseShapeH, surfaceW, + offsetW, offsetH, elemBitSize, tileW, tileH, vblocks, + xevm::LoadCacheControlAttr::get(ctxt, loadCacheControl)); + rewriter.eraseOp(op); + } else { + VectorType dstVecTy = cast(op.getValue().getType()); + const bool vnni = op.getPacked().value_or(false); + auto transposeValue = op.getTranspose(); + bool transpose = + transposeValue.has_value() && transposeValue.value()[0] == 1; + VectorType loadedTy = encodeVectorTypeTo( + dstVecTy, vnni ? rewriter.getI32Type() + : rewriter.getIntegerType(elemBitSize)); + + Value resultFlatVec = xevm::BlockLoad2dOp::create( + rewriter, loc, loadedTy, basePtrLLVM, surfaceW, baseShapeH, + surfaceW, offsetW, offsetH, elemBitSize, tileW, tileH, vblocks, + transpose, vnni, + xevm::LoadCacheControlAttr::get(ctxt, loadCacheControl)); + resultFlatVec = vector::BitCastOp::create( + rewriter, loc, + encodeVectorTypeTo(loadedTy, dstVecTy.getElementType()), + resultFlatVec); + rewriter.replaceOp(op, resultFlatVec); + } + } + } else { + // 1D tensor descriptor. + // `tdesc` represents base address as i64 + // Offset in number of elements, need to multiply by element byte size. + // Compute byte offset. + // byteOffset = offset * elementByteSize + Value offset = + getValueOrCreateConstantIntOp(rewriter, loc, mixedOffsets[0]); + offset = getValueOrCreateCastToIndexLike(rewriter, loc, + rewriter.getI64Type(), offset); + // Compute element byte size. + Value elemByteSize = arith::ConstantIntOp::create( + rewriter, loc, rewriter.getI64Type(), elemBitSize / 8); + Value byteOffset = + rewriter.createOrFold(loc, offset, elemByteSize); + // Final address = basePtr + byteOffset + Value finalAddrI64 = rewriter.createOrFold( + loc, tdesc, + getValueOrCreateCastToIndexLike(rewriter, loc, rewriter.getI64Type(), + byteOffset)); + // Convert base pointer (i64) to LLVM pointer type. + Value finalPtrLLVM = + LLVM::IntToPtrOp::create(rewriter, loc, ptrTypeLLVM, finalAddrI64); + if constexpr (std::is_same_v) { + Value src = adaptor.getValue(); + // If store value is a scalar, get value from op instead of adaptor. + // Adaptor might have optimized away single element vector + if (src.getType().isIntOrFloat()) { + src = op.getValue(); + } + VectorType srcVecTy = dyn_cast(src.getType()); + if (!srcVecTy) + return rewriter.notifyMatchFailure( + op, "Expected store value to be a vector type."); + // Get flat vector type of integer type with matching element bit size. + VectorType newSrcVecTy = + encodeVectorTypeTo(srcVecTy, rewriter.getIntegerType(elemBitSize)); + if (srcVecTy != newSrcVecTy) + src = vector::BitCastOp::create(rewriter, loc, newSrcVecTy, src); + auto storeCacheControl = + translateStoreXeGPUCacheHint(op.getL1Hint(), op.getL3Hint()); + rewriter.replaceOpWithNewOp( + op, finalPtrLLVM, src, + xevm::StoreCacheControlAttr::get(ctxt, storeCacheControl)); + } else if constexpr (std::is_same_v) { + auto loadCacheControl = + translateLoadXeGPUCacheHint(op.getL1Hint(), op.getL3Hint()); + VectorType resTy = cast(op.getValue().getType()); + VectorType loadedTy = + encodeVectorTypeTo(resTy, rewriter.getIntegerType(elemBitSize)); + Value load = xevm::BlockLoadOp::create( + rewriter, loc, loadedTy, finalPtrLLVM, xevm::LoadCacheControlAttr::get(ctxt, loadCacheControl)); - resultFlatVec = vector::BitCastOp::create( - rewriter, loc, - encodeVectorTypeTo(loadedTy, dstVecTy.getElementType()), - resultFlatVec); - rewriter.replaceOp(op, resultFlatVec); + if (loadedTy != resTy) + load = vector::BitCastOp::create(rewriter, loc, resTy, load); + rewriter.replaceOp(op, load); + } else { + return rewriter.notifyMatchFailure( + op, "Unsupported operation: xegpu.prefetch_nd with tensor " + "descriptor rank == 1"); } } return success(); @@ -929,7 +997,10 @@ struct ConvertXeGPUToXeVMPass return VectorType::get(sum, elemType); }); typeConverter.addConversion([&](xegpu::TensorDescType type) -> Type { + // Scattered descriptors are not supported in XeVM lowering. if (type.isScattered()) + return {}; + if (type.getRank() == 1) return IntegerType::get(&getContext(), 64); auto i32Type = IntegerType::get(&getContext(), 32); return VectorType::get(8, i32Type); diff --git a/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp b/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp index 1a1485ba2e02c..b097d3a0c9686 100644 --- a/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp +++ b/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp @@ -63,13 +63,20 @@ void buildGPUPassPipeline(OpPassManager &pm, if (options.xegpuOpLevel == "workgroup") { pm.addNestedPass(xegpu::createXeGPUWgToSgDistribute()); pm.addNestedPass(createCSEPass()); + xegpu::XeGPUPropagateLayoutOptions layoutOptions; + layoutOptions.layoutKind = "inst"; + pm.addNestedPass( + xegpu::createXeGPUPropagateLayout(layoutOptions)); pm.addNestedPass(xegpu::createXeGPUBlocking()); pm.addNestedPass(createCanonicalizerPass()); pm.addNestedPass(createCSEPass()); } if (options.xegpuOpLevel == "subgroup" || options.xegpuOpLevel == "workgroup") { - pm.addNestedPass(xegpu::createXeGPUPropagateLayout()); + xegpu::XeGPUPropagateLayoutOptions layoutOptions; + layoutOptions.layoutKind = "lane"; + pm.addNestedPass( + xegpu::createXeGPUPropagateLayout(layoutOptions)); pm.addNestedPass(xegpu::createXeGPUSubgroupDistribute()); pm.addNestedPass(createCanonicalizerPass()); pm.addNestedPass(createCSEPass()); diff --git a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp index 6a81a15f30e47..c498c8a60bf6e 100644 --- a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp @@ -90,17 +90,16 @@ struct DimOfReifyRankedShapedTypeOpInterface : public OpRewritePattern { if (!dimIndex) return failure(); - ReifiedRankedShapedTypeDims reifiedResultShapes; - if (failed(reifyResultShapes(rewriter, dimValue.getOwner(), - reifiedResultShapes))) + FailureOr replacement = reifyDimOfResult( + rewriter, dimValue.getOwner(), dimValue.getResultNumber(), *dimIndex); + if (failed(replacement)) return failure(); - unsigned resultNumber = dimValue.getResultNumber(); - // Do not apply pattern if the IR is invalid (dim out of bounds). - if ((size_t)(*dimIndex) >= reifiedResultShapes[resultNumber].size()) - return rewriter.notifyMatchFailure(dimOp, "dimension is out of bounds"); - Value replacement = getValueOrCreateConstantIndexOp( - rewriter, dimOp.getLoc(), reifiedResultShapes[resultNumber][*dimIndex]); - rewriter.replaceOp(dimOp, replacement); + // Check if the OpFoldResult is empty (unreifiable dimension). + if (!replacement.value()) + return failure(); + Value replacementVal = getValueOrCreateConstantIndexOp( + rewriter, dimOp.getLoc(), replacement.value()); + rewriter.replaceOp(dimOp, replacementVal); return success(); } }; @@ -166,12 +165,14 @@ namespace { struct ResolveRankedShapeTypeResultDimsPass final : public memref::impl::ResolveRankedShapeTypeResultDimsPassBase< ResolveRankedShapeTypeResultDimsPass> { + using Base::Base; void runOnOperation() override; }; struct ResolveShapedTypeResultDimsPass final : public memref::impl::ResolveShapedTypeResultDimsPassBase< ResolveShapedTypeResultDimsPass> { + using Base::Base; void runOnOperation() override; }; @@ -195,14 +196,22 @@ void memref::populateResolveShapedTypeResultDimsPatterns( void ResolveRankedShapeTypeResultDimsPass::runOnOperation() { RewritePatternSet patterns(&getContext()); memref::populateResolveRankedShapedTypeResultDimsPatterns(patterns); - if (failed(applyPatternsGreedily(getOperation(), std::move(patterns)))) + auto result = applyPatternsGreedily(getOperation(), std::move(patterns)); + if (errorOnPatternIterationLimit && failed(result)) { + getOperation()->emitOpError( + "dim operation resolution hit pattern iteration limit"); return signalPassFailure(); + } } void ResolveShapedTypeResultDimsPass::runOnOperation() { RewritePatternSet patterns(&getContext()); memref::populateResolveRankedShapedTypeResultDimsPatterns(patterns); memref::populateResolveShapedTypeResultDimsPatterns(patterns); - if (failed(applyPatternsGreedily(getOperation(), std::move(patterns)))) + auto result = applyPatternsGreedily(getOperation(), std::move(patterns)); + if (errorOnPatternIterationLimit && failed(result)) { + getOperation()->emitOpError( + "dim operation resolution hit pattern iteration limit"); return signalPassFailure(); + } } diff --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp index 4ec13e189f621..686f6eed1f8c7 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp @@ -77,6 +77,9 @@ namespace { struct ReifyExpandShapeOp : public ReifyRankedShapedTypeOpInterface::ExternalModel { + using Base = + ReifyRankedShapedTypeOpInterface::ExternalModel; LogicalResult reifyResultShapes(Operation *op, OpBuilder &b, ReifiedRankedShapedTypeDims &reifyResultShapes) const { diff --git a/mlir/lib/Dialect/XeGPU/TransformOps/XeGPUTransformOps.cpp b/mlir/lib/Dialect/XeGPU/TransformOps/XeGPUTransformOps.cpp index 8943ba09d9c34..5fdd8534e4e51 100644 --- a/mlir/lib/Dialect/XeGPU/TransformOps/XeGPUTransformOps.cpp +++ b/mlir/lib/Dialect/XeGPU/TransformOps/XeGPUTransformOps.cpp @@ -13,6 +13,9 @@ #include +#include "llvm/Support/DebugLog.h" +#define DEBUG_TYPE "xegpu-transforms" + using namespace mlir; using namespace mlir::transform; @@ -76,6 +79,45 @@ static DiagnosedSilenceableFailure convertMixedValuesToInt( return DiagnosedSilenceableFailure::success(); } +/// Find producer operation of type T for the given value. +/// It's assumed that producer ops are chained through their first operand. +/// Producer chain is traced trough loop block arguments (init values). +template +static std::optional findProducerOfType(Value val) { + Value currentValue = val; + if (!currentValue.getDefiningOp()) { + // Value may be a block argument initialized outside a loop. + if (val.getNumUses() == 0) { + LDBG() << "Failed to find producer op, value has no uses."; + return std::nullopt; + } + auto userOp = val.getUsers().begin(); + auto parentLoop = userOp->getParentOfType(); + if (!parentLoop) { + LDBG() << "Failed to find producer op, not in a loop."; + return std::nullopt; + } + int64_t iterArgIdx; + if (auto iterArg = llvm::dyn_cast(currentValue)) { + auto numInductionVars = parentLoop.getLoopInductionVars()->size(); + iterArgIdx = iterArg.getArgNumber() - numInductionVars; + currentValue = parentLoop.getInits()[iterArgIdx]; + } else { + LDBG() << "Failed to find producer op, value not in init values."; + return std::nullopt; + } + } + Operation *producerOp = currentValue.getDefiningOp(); + + if (auto matchingOp = dyn_cast(producerOp)) + return matchingOp; + + if (producerOp->getNumOperands() == 0) + return std::nullopt; + + return findProducerOfType(producerOp->getOperand(0)); +} + /// Create a layout attribute from the given parameters. static xegpu::LayoutAttr createLayoutAttr(MLIRContext *ctx, ArrayRef sgLayout, @@ -90,6 +132,36 @@ createLayoutAttr(MLIRContext *ctx, ArrayRef sgLayout, /*order=*/nullptr); } +/// Generate `xegpu::LayoutAttr` from op mixed layout values. +DiagnosedSilenceableFailure +getLayoutAttrFromOperands(MLIRContext *ctx, transform::TransformState &state, + TransformOpInterface transformOp, + ArrayRef<::mlir::OpFoldResult> mixedSgLayout, + ArrayRef<::mlir::OpFoldResult> mixedSgData, + ArrayRef<::mlir::OpFoldResult> mixedInstData, + xegpu::LayoutAttr &layoutAttr) { + SmallVector sgLayout, sgData, instData; + auto status = + convertMixedValuesToInt(state, transformOp, sgLayout, mixedSgLayout); + if (!status.succeeded()) + return status; + + status = convertMixedValuesToInt(state, transformOp, sgData, mixedSgData); + if (!status.succeeded()) + return status; + + status = convertMixedValuesToInt(state, transformOp, instData, mixedInstData); + if (!status.succeeded()) + return status; + auto maybeInstData = instData.empty() + ? std::nullopt + : std::optional>(instData); + + layoutAttr = createLayoutAttr(ctx, sgLayout, sgData, maybeInstData); + + return DiagnosedSilenceableFailure::success(); +} + /// Replace xegpu.create_nd_desc op with a new one with the given layout. static xegpu::CreateNdDescOp setDescLayout(transform::TransformRewriter &rewriter, @@ -111,6 +183,29 @@ setDescLayout(transform::TransformRewriter &rewriter, return newDescOp; } +DiagnosedSilenceableFailure +transform::GetDescOp::apply(transform::TransformRewriter &rewriter, + transform::TransformResults &results, + transform::TransformState &state) { + auto targetValues = state.getPayloadValues(getTarget()); + if (!llvm::hasSingleElement(targetValues)) { + return emitDefiniteFailure() + << "requires exactly one target value handle (got " + << llvm::range_size(targetValues) << ")"; + } + + auto maybeDescOp = + findProducerOfType(*targetValues.begin()); + if (!maybeDescOp) { + return emitSilenceableFailure(getLoc()) + << "Could not find a matching descriptor op when walking the " + "producer chain of the first operand."; + } + + results.set(llvm::cast(getResult()), {*maybeDescOp}); + return DiagnosedSilenceableFailure::success(); +} + void transform::SetDescLayoutOp::build(OpBuilder &builder, OperationState &result, Value target, ArrayRef mixedSgLayout, @@ -142,26 +237,13 @@ transform::SetDescLayoutOp::apply(transform::TransformRewriter &rewriter, } Operation *target = *targetOps.begin(); - SmallVector sgLayout; - DiagnosedSilenceableFailure status = - convertMixedValuesToInt(state, (*this), sgLayout, getMixedSgLayout()); - if (!status.succeeded()) - return status; - - SmallVector sgData; - status = convertMixedValuesToInt(state, (*this), sgData, getMixedSgData()); + xegpu::LayoutAttr layoutAttr = nullptr; + auto status = getLayoutAttrFromOperands(getContext(), state, (*this), + getMixedSgLayout(), getMixedSgData(), + getMixedInstData(), layoutAttr); if (!status.succeeded()) return status; - SmallVector instData; - status = - convertMixedValuesToInt(state, (*this), instData, getMixedInstData()); - if (!status.succeeded()) - return status; - auto maybeInstData = instData.empty() - ? std::nullopt - : std::optional>(instData); - // For now only create_nd_desc op is supported. auto descOp = dyn_cast(target); if (!descOp) { @@ -173,8 +255,6 @@ transform::SetDescLayoutOp::apply(transform::TransformRewriter &rewriter, } // Set layout attr in desc op's return type. Replaces old desc op. - auto layoutAttr = - createLayoutAttr(rewriter.getContext(), sgLayout, sgData, maybeInstData); auto newdescOp = setDescLayout(rewriter, descOp, layoutAttr); // Map result handles. @@ -193,6 +273,74 @@ void transform::SetDescLayoutOp::getEffects( modifiesPayload(effects); } +void transform::SetOpLayoutAttrOp::build( + OpBuilder &builder, OperationState &ostate, Value target, int64_t index, + ArrayRef mixedSgLayout, ArrayRef mixedSgData, + ArrayRef mixedInstData, bool result) { + SmallVector staticSgLayout, staticSgData, staticInstData; + SmallVector dynamicSgLayout, dynamicSgData, dynamicInstData; + dispatchIndexOpFoldResults(mixedSgLayout, dynamicSgLayout, staticSgLayout); + dispatchIndexOpFoldResults(mixedSgData, dynamicSgData, staticSgData); + dispatchIndexOpFoldResults(mixedInstData, dynamicInstData, staticInstData); + build(builder, ostate, target.getType(), + /*target=*/target, + /*index=*/index, + /*sg_layout=*/dynamicSgLayout, + /*sg_data=*/dynamicSgData, + /*inst_data=*/dynamicInstData, + /*static_sg_layout=*/staticSgLayout, + /*static_sg_data=*/staticSgData, + /*static_inst_data=*/staticInstData, + /*result=*/result); +} + +DiagnosedSilenceableFailure +transform::SetOpLayoutAttrOp::apply(transform::TransformRewriter &rewriter, + transform::TransformResults &results, + transform::TransformState &state) { + auto targetOps = state.getPayloadOps(getTarget()); + if (!llvm::hasSingleElement(targetOps)) { + return emitDefiniteFailure() << "Requires exactly one targetOp handle (got " + << llvm::range_size(targetOps) << ")"; + } + Operation *target = *targetOps.begin(); + + bool resultTarget = getResult(); + + int64_t index = getIndex(); + if (resultTarget && index >= target->getNumResults()) { + return emitSilenceableFailure(getLoc()) + << "Index exceeds the number of op results"; + } + if (!resultTarget && index >= target->getNumOperands()) { + return emitSilenceableFailure(getLoc()) + << "Index exceeds the number of op operands"; + } + + xegpu::LayoutAttr layoutAttr = nullptr; + auto status = getLayoutAttrFromOperands(getContext(), state, (*this), + getMixedSgLayout(), getMixedSgData(), + getMixedInstData(), layoutAttr); + if (!status.succeeded()) + return status; + + // Set layout attribute for the op result or operand + if (resultTarget) + xegpu::setDistributeLayoutAttr(target->getResult(index), layoutAttr); + else + xegpu::setDistributeLayoutAttr(target->getOpOperand(index), layoutAttr); + return DiagnosedSilenceableFailure::success(); +} + +void transform::SetOpLayoutAttrOp::getEffects( + ::llvm::SmallVectorImpl &effects) { + onlyReadsHandle(getTargetMutable(), effects); + onlyReadsHandle(getSgLayoutMutable(), effects); + onlyReadsHandle(getSgDataMutable(), effects); + onlyReadsHandle(getInstDataMutable(), effects); + modifiesPayload(effects); +} + namespace { class XeGPUTransformDialectExtension : public transform::TransformDialectExtension< diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUPropagateLayout.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUPropagateLayout.cpp index 4e1a539771d2f..b3a780abd3f12 100644 --- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUPropagateLayout.cpp +++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUPropagateLayout.cpp @@ -53,6 +53,8 @@ using namespace mlir::dataflow; namespace { +enum class LayoutKind { Lane, InstData }; + //===----------------------------------------------------------------------===// // LayoutInfo //===----------------------------------------------------------------------===// @@ -166,7 +168,8 @@ LayoutInfo LayoutInfo::join(const LayoutInfo &lhs, const LayoutInfo &rhs) { llvm_unreachable("Join should not be triggered by layout propagation."); } -/// Construct a new layout with the transposed lane layout and lane data. +/// Construct a new layout with the transposed inst_data or lane_layout, +/// lane_data. LayoutInfo LayoutInfo::transpose(ArrayRef permutation) const { if (!isAssigned()) return {}; @@ -186,12 +189,20 @@ LayoutInfo LayoutInfo::transpose(ArrayRef permutation) const { SmallVector laneData; SmallVector instData; for (int64_t idx : permutation) { - laneLayout.push_back(static_cast(getLaneLayout()[idx])); - laneData.push_back(static_cast(getLaneData()[idx])); - instData.push_back(static_cast(getInstData()[idx])); + if (getLaneLayout().size()) { + laneLayout.push_back(static_cast(getLaneLayout()[idx])); + laneData.push_back(static_cast(getLaneData()[idx])); + } + if (getInstData().size()) + instData.push_back(static_cast(getInstData()[idx])); } - return LayoutInfo(xegpu::LayoutAttr::get(storage.getContext(), instData, - laneLayout, laneData)); + xegpu::LayoutAttr layoutAttr; + if (getLaneLayout().size()) + layoutAttr = + xegpu::LayoutAttr::get(storage.getContext(), laneLayout, laneData); + if (getInstData().size()) + layoutAttr = xegpu::LayoutAttr::get(storage.getContext(), instData); + return LayoutInfo(layoutAttr); } //===----------------------------------------------------------------------===// @@ -213,15 +224,14 @@ struct LayoutInfoLattice : public Lattice { /// For 2D vector, lane_layout is [1, subgroupSize] and lane_data is [1, 1]. static LayoutInfo getDefaultSIMTLayoutInfo(mlir::MLIRContext *ctx, unsigned rank, - const xegpu::uArch::uArch *uArch, - ArrayRef instData) { + const xegpu::uArch::uArch *uArch) { assert((rank == 1 || rank == 2) && "Expected 1D or 2D vector."); if (rank == 1) { return LayoutInfo( - xegpu::LayoutAttr::get(ctx, instData, {uArch->getSubgroupSize()}, {1})); + xegpu::LayoutAttr::get(ctx, {uArch->getSubgroupSize()}, {1})); } - return LayoutInfo(xegpu::LayoutAttr::get( - ctx, instData, {1, uArch->getSubgroupSize()}, {1, 1})); + return LayoutInfo( + xegpu::LayoutAttr::get(ctx, {1, uArch->getSubgroupSize()}, {1, 1})); } static LayoutInfo getDefaultSIMTLayoutInfo(mlir::MLIRContext *ctx, @@ -236,7 +246,6 @@ static LayoutInfo getDefaultSIMTLayoutInfo(mlir::MLIRContext *ctx, /// Helper to get the default layout for a vector type. static LayoutInfo getDefaultSIMTLayoutInfo(VectorType vectorTy, const xegpu::uArch::uArch *uArch, - ArrayRef instData, unsigned packingSize, bool isScattered = false) { // Expecting a 1D or 2D vector. @@ -247,16 +256,16 @@ static LayoutInfo getDefaultSIMTLayoutInfo(VectorType vectorTy, "Expected int or float element type."); // If the rank is 1, then return default layout for 1D vector. if (vectorTy.getRank() == 1) - return getDefaultSIMTLayoutInfo(vectorTy.getContext(), 1, uArch, instData); + return getDefaultSIMTLayoutInfo(vectorTy.getContext(), 1, uArch); // Packing factor is determined by the element type bitwidth. unsigned bitwidth = vectorTy.getElementType().getIntOrFloatBitWidth(); int packingFactor = bitwidth < packingSize ? packingSize / bitwidth : 1; if (isScattered) { - return LayoutInfo(xegpu::LayoutAttr::get(vectorTy.getContext(), instData, + return LayoutInfo(xegpu::LayoutAttr::get(vectorTy.getContext(), {uArch->getSubgroupSize(), 1}, {1, packingFactor})); } - return LayoutInfo(xegpu::LayoutAttr::get(vectorTy.getContext(), instData, + return LayoutInfo(xegpu::LayoutAttr::get(vectorTy.getContext(), {1, uArch->getSubgroupSize()}, {1, packingFactor})); } @@ -264,7 +273,6 @@ static LayoutInfo getDefaultSIMTLayoutInfo(VectorType vectorTy, /// Helper to get the default layout for a vector type. static LayoutInfo getDefaultSIMTLayoutInfo(xegpu::TensorDescType tdescTy, const xegpu::uArch::uArch *uArch, - ArrayRef instData, unsigned packingSize, bool isScattered = false) { // Expecting a 1D or 2D vector. @@ -275,18 +283,18 @@ static LayoutInfo getDefaultSIMTLayoutInfo(xegpu::TensorDescType tdescTy, "Expected int or float element type."); // If the rank is 1, then return default layout for 1D vector. if (tdescTy.getRank() == 1) - return getDefaultSIMTLayoutInfo(tdescTy.getContext(), 1, uArch, instData); + return getDefaultSIMTLayoutInfo(tdescTy.getContext(), 1, uArch); // Packing factor is determined by the element type bitwidth. unsigned bitwidth = tdescTy.getElementType().getIntOrFloatBitWidth(); int subgroupSize = uArch->getSubgroupSize(); int packingFactor = bitwidth < packingSize ? packingSize / bitwidth : 1; if (isScattered) { return LayoutInfo(xegpu::LayoutAttr::get( - tdescTy.getContext(), instData, {subgroupSize, 1}, {1, packingFactor})); + tdescTy.getContext(), {subgroupSize, 1}, {1, packingFactor})); } return LayoutInfo(xegpu::LayoutAttr::get( - tdescTy.getContext(), instData, {1, subgroupSize}, {1, packingFactor})); + tdescTy.getContext(), {1, subgroupSize}, {1, packingFactor})); } /// Helper Function to get the expected layouts for DPAS operands. `lane_data` @@ -298,7 +306,7 @@ static LayoutInfo getDefaultSIMTLayoutInfo(xegpu::TensorDescType tdescTy, static LayoutInfo getSIMTLayoutInfoForDPASOperand(VectorType vectorTy, unsigned operandNum, const xegpu::uArch::uArch *uArch, - ArrayRef instData, unsigned packingSize) { + unsigned packingSize) { Type elementTy = vectorTy.getElementType(); assert(elementTy.isIntOrFloat() && "Expected int or float type in DPAS operands"); @@ -310,10 +318,10 @@ getSIMTLayoutInfoForDPASOperand(VectorType vectorTy, unsigned operandNum, {static_cast(packingSize / elementTy.getIntOrFloatBitWidth()), 1}); return LayoutInfo( - xegpu::LayoutAttr::get(vectorTy.getContext(), instData, layout, data)); + xegpu::LayoutAttr::get(vectorTy.getContext(), layout, data)); } // Otherwise, return the default layout for the vector type. - return getDefaultSIMTLayoutInfo(vectorTy, uArch, instData, packingSize); + return getDefaultSIMTLayoutInfo(vectorTy, uArch, packingSize); } //===----------------------------------------------------------------------===// @@ -328,6 +336,7 @@ getSIMTLayoutInfoForDPASOperand(VectorType vectorTy, unsigned operandNum, class LayoutInfoPropagation : public SparseBackwardDataFlowAnalysis { private: + LayoutKind layoutKind; void visitDpasOp(xegpu::DpasOp dpas, ArrayRef operands, ArrayRef results); @@ -380,8 +389,10 @@ class LayoutInfoPropagation public: LayoutInfoPropagation(DataFlowSolver &solver, - SymbolTableCollection &symbolTable) - : SparseBackwardDataFlowAnalysis(solver, symbolTable) {} + SymbolTableCollection &symbolTable, + LayoutKind layoutKind) + : SparseBackwardDataFlowAnalysis(solver, symbolTable), + layoutKind(layoutKind) {} using SparseBackwardDataFlowAnalysis::SparseBackwardDataFlowAnalysis; LogicalResult @@ -499,8 +510,14 @@ void LayoutInfoPropagation::visitPrefetchNdOp( "No suitable instruction multiple found for the given shape."); instData = {instHeight, instWidth}; } - auto prefetchLayout = getDefaultSIMTLayoutInfo( - tdescTy, uArch, instData, uArchInstruction->getPackedFormatBitSize()); + LayoutInfo prefetchLayout; + if (layoutKind == LayoutKind::InstData) + prefetchLayout = + LayoutInfo(xegpu::LayoutAttr::get(tdescTy.getContext(), instData)); + else + prefetchLayout = getDefaultSIMTLayoutInfo( + tdescTy, uArch, uArchInstruction->getPackedFormatBitSize()); + // Propagate the layout to the source tensor descriptor. propagateIfChanged(operands[0], operands[0]->meet(prefetchLayout)); } @@ -627,14 +644,24 @@ void LayoutInfoPropagation::visitDpasOp( SmallVector instDataA = {maxALen, subgroupSize}; SmallVector instDataB = {subgroupSize, maxBLen}; - propagateIfChanged(operands[0], - operands[0]->meet(getSIMTLayoutInfoForDPASOperand( - aTy, 0, uArch, instDataA, - uArchInstruction->getPackedFormatBitSizeA()))); - propagateIfChanged(operands[1], - operands[1]->meet(getSIMTLayoutInfoForDPASOperand( - bTy, 1, uArch, instDataB, - uArchInstruction->getPackedFormatBitSizeB()))); + LayoutInfo dpasALayout; + LayoutInfo dpasBLayout; + LayoutInfo dpasCLayout; + + if (layoutKind == LayoutKind::InstData) { + dpasALayout = + LayoutInfo(xegpu::LayoutAttr::get(dpas.getContext(), instDataA)); + dpasBLayout = + LayoutInfo(xegpu::LayoutAttr::get(dpas.getContext(), instDataB)); + } else { + dpasALayout = getSIMTLayoutInfoForDPASOperand( + aTy, 0, uArch, uArchInstruction->getPackedFormatBitSizeA()); + dpasBLayout = getSIMTLayoutInfoForDPASOperand( + bTy, 1, uArch, uArchInstruction->getPackedFormatBitSizeB()); + } + + propagateIfChanged(operands[0], operands[0]->meet(dpasALayout)); + propagateIfChanged(operands[1], operands[1]->meet(dpasBLayout)); if (operands.size() > 2) { VectorType cTy = dpas.getAccType(); const unsigned dataCLen = bTy.getShape().back(); @@ -645,10 +672,15 @@ void LayoutInfoPropagation::visitDpasOp( dpas.emitWarning( "No suitable instruction multiple found for the given shape."); SmallVector instDataC = {maxALen, maxCLen}; - propagateIfChanged(operands[2], - operands[2]->meet(getSIMTLayoutInfoForDPASOperand( - cTy, 2, uArch, instDataC, - uArchInstruction->getPackedFormatBitSizeB()))); + + if (layoutKind == LayoutKind::InstData) + dpasCLayout = + LayoutInfo(xegpu::LayoutAttr::get(dpas.getContext(), instDataC)); + else + dpasCLayout = getSIMTLayoutInfoForDPASOperand( + cTy, 2, uArch, uArchInstruction->getPackedFormatBitSizeB()); + + propagateIfChanged(operands[2], operands[2]->meet(dpasCLayout)); } } @@ -685,9 +717,15 @@ void LayoutInfoPropagation::visitStoreNdOp( "No suitable instruction multiple found for the given shape."); instData = {instHeight, instWidth}; } - LayoutInfo storeLayout = - getDefaultSIMTLayoutInfo(store.getValueType(), uArch, instData, - uArchInstruction->getPackedFormatBitSize()); + + LayoutInfo storeLayout; + if (layoutKind == LayoutKind::InstData) + storeLayout = + LayoutInfo(xegpu::LayoutAttr::get(dataTy.getContext(), instData)); + else + storeLayout = + getDefaultSIMTLayoutInfo(store.getValueType(), uArch, + uArchInstruction->getPackedFormatBitSize()); // Both operands should have the same layout for (LayoutInfoLattice *operand : operands) propagateIfChanged(operand, operand->meet(storeLayout)); @@ -818,9 +856,13 @@ void LayoutInfoPropagation::visitLoadGatherOp( if (srcTdescTy.getChunkSizeAsInt() > 1) instData.push_back(chunkSize); } - LayoutInfo layout = getDefaultSIMTLayoutInfo( - payloadTy, uArch, instData, uArch->getGeneralPackedFormatBitSize(), - /*scattered*/ true); + LayoutInfo layout; + if (layoutKind == LayoutKind::InstData) + layout = LayoutInfo(xegpu::LayoutAttr::get(load.getContext(), instData)); + else + layout = getDefaultSIMTLayoutInfo(payloadTy, uArch, + uArch->getGeneralPackedFormatBitSize(), + /*scattered*/ true); // Mask operand should have 1D default layout. LayoutInfo maskLayout = @@ -864,33 +906,36 @@ void LayoutInfoPropagation::visitStoreScatterOp( storeScatter.emitWarning("Not propagating, non-vector payload supplied."); return; } + LayoutInfo payloadLayout; auto uArch = getUArch(getChipStr(storeScatter).value_or("")); const int subgroupSize = uArch->getSubgroupSize(); - auto payloadShape = payloadTy.getShape(); - if (payloadShape.size() > 1) - assert( - payloadShape[0] == subgroupSize && - "Expected the first dimension of 2D tensor descriptor to be equal to " - "subgroup size."); - - SmallVector instData{subgroupSize}; - if (auto chunkSize = storeScatter.getChunkSize().value_or(0); chunkSize > 1) - instData.push_back(chunkSize); - else if (auto dstTdescTy = - dyn_cast(storeScatter.getDestType())) { - if (dstTdescTy.getChunkSizeAsInt() > 1) - instData.push_back(chunkSize); - } - - LayoutInfo payloadLayout; - if (auto layout = storeScatter.getLayoutAttr()) { payloadLayout = LayoutInfo(layout); } else { - payloadLayout = getDefaultSIMTLayoutInfo( - payloadTy, uArch, instData, uArch->getGeneralPackedFormatBitSize(), - /*scattered=*/true); + if (layoutKind == LayoutKind::InstData) { + SmallVector instData{subgroupSize}; + if (auto chunkSize = storeScatter.getChunkSize().value_or(0); + chunkSize > 1) + instData.push_back(chunkSize); + else if (auto dstTdescTy = dyn_cast( + storeScatter.getDestType())) { + if (dstTdescTy.getChunkSizeAsInt() > 1) + instData.push_back(chunkSize); + } + payloadLayout = LayoutInfo( + xegpu::LayoutAttr::get(storeScatter.getContext(), instData)); + } else { + auto payloadShape = payloadTy.getShape(); + if (payloadShape.size() > 1) + assert(payloadShape[0] == subgroupSize && + "Expected the first dimension of 2D tensor descriptor to be " + "equal to " + "subgroup size."); + payloadLayout = getDefaultSIMTLayoutInfo( + payloadTy, uArch, uArch->getGeneralPackedFormatBitSize(), + /*scattered=*/true); + } } LayoutInfo maskLayout = @@ -916,10 +961,10 @@ class RunLayoutInfoPropagation { public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(RunLayoutInfoPropagation) - RunLayoutInfoPropagation(Operation *op) : target(op) { + RunLayoutInfoPropagation(Operation *op, LayoutKind layoutKind) : target(op) { SymbolTableCollection symbolTable; loadBaselineAnalyses(solver); - solver.load(symbolTable); + solver.load(symbolTable, layoutKind); (void)solver.initializeAndRun(op); } @@ -1159,7 +1204,18 @@ struct XeGPUPropagateLayoutPass final } // namespace void XeGPUPropagateLayoutPass::runOnOperation() { - auto &analysis = getAnalysis(); + LayoutKind layoutKind; + if (this->layoutKind == "lane") { + layoutKind = LayoutKind::Lane; + } else if (this->layoutKind == "inst") { + layoutKind = LayoutKind::InstData; + } else { + getOperation()->emitError("Unsupported layout kind option: " + + this->layoutKind); + signalPassFailure(); + return; + } + RunLayoutInfoPropagation analysis(getOperation(), layoutKind); // Print the analysis result and exit. (for debugging purposes) if (printOnly) { auto &os = llvm::outs(); @@ -1173,8 +1229,6 @@ void XeGPUPropagateLayoutPass::runOnOperation() { return {}; xegpu::DistributeLayoutAttr layoutAttr = cast(layout.get()); - if (this->layoutKind == "lane") - layoutAttr = layoutAttr.dropInstData(); if (layout.isSliceLayout()) return cast(layoutAttr); return cast(layoutAttr); diff --git a/mlir/lib/Interfaces/InferTypeOpInterface.cpp b/mlir/lib/Interfaces/InferTypeOpInterface.cpp index 9f4f672fb9f4d..c31e0ae7470e2 100644 --- a/mlir/lib/Interfaces/InferTypeOpInterface.cpp +++ b/mlir/lib/Interfaces/InferTypeOpInterface.cpp @@ -58,6 +58,22 @@ mlir::reifyResultShapes(OpBuilder &b, Operation *op, return status; } +FailureOr> +mlir::reifyShapeOfResult(OpBuilder &b, Operation *op, int resultIndex) { + auto reifiableOp = dyn_cast(op); + if (!reifiableOp) + return failure(); + return reifiableOp.reifyShapeOfResult(b, resultIndex); +} + +FailureOr mlir::reifyDimOfResult(OpBuilder &b, Operation *op, + int resultIndex, int dim) { + auto reifiableOp = dyn_cast(op); + if (!reifiableOp) + return failure(); + return reifiableOp.reifyDimOfResult(b, resultIndex, dim); +} + bool ShapeAdaptor::hasRank() const { if (val.isNull()) return false; diff --git a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp index 9ef405dad5a70..018a188d09109 100644 --- a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp +++ b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp @@ -681,17 +681,8 @@ processBuffer(raw_ostream &os, std::unique_ptr ownedBuffer, return success(); } -std::pair -mlir::registerAndParseCLIOptions(int argc, char **argv, - llvm::StringRef toolName, - DialectRegistry ®istry) { - static cl::opt inputFilename( - cl::Positional, cl::desc(""), cl::init("-")); - - static cl::opt outputFilename("o", cl::desc("Output filename"), - cl::value_desc("filename"), - cl::init("-")); - // Register any command line options. +std::string mlir::registerCLIOptions(llvm::StringRef toolName, + DialectRegistry ®istry) { MlirOptMainConfig::registerCLOptions(registry); registerAsmPrinterCLOptions(); registerMLIRContextCLOptions(); @@ -706,11 +697,29 @@ mlir::registerAndParseCLIOptions(int argc, char **argv, interleaveComma(registry.getDialectNames(), os, [&](auto name) { os << name; }); } - // Parse pass names in main to ensure static initialization completed. + return helpHeader; +} + +std::pair +mlir::parseCLIOptions(int argc, char **argv, llvm::StringRef helpHeader) { + static cl::opt inputFilename( + cl::Positional, cl::desc(""), cl::init("-")); + + static cl::opt outputFilename("o", cl::desc("Output filename"), + cl::value_desc("filename"), + cl::init("-")); cl::ParseCommandLineOptions(argc, argv, helpHeader); return std::make_pair(inputFilename.getValue(), outputFilename.getValue()); } +std::pair +mlir::registerAndParseCLIOptions(int argc, char **argv, + llvm::StringRef toolName, + DialectRegistry ®istry) { + auto helpHeader = registerCLIOptions(toolName, registry); + return parseCLIOptions(argc, argv, helpHeader); +} + static LogicalResult printRegisteredDialects(DialectRegistry ®istry) { llvm::outs() << "Available Dialects: "; interleave(registry.getDialectNames(), llvm::outs(), ","); diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp index f8c38fadbd229..9945a711d5c74 100644 --- a/mlir/lib/Transforms/Utils/DialectConversion.cpp +++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp @@ -2856,17 +2856,19 @@ LogicalResult OperationLegalizer::legalizePatternResult( assert(impl.pendingRootUpdates.empty() && "dangling root updates"); #if MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS - // Check that the root was either replaced or updated in place. - auto newRewrites = llvm::drop_begin(impl.rewrites, curState.numRewrites); - auto replacedRoot = [&] { - return hasRewrite(newRewrites, op); - }; - auto updatedRootInPlace = [&] { - return hasRewrite(newRewrites, op); - }; - if (!replacedRoot() && !updatedRootInPlace()) - llvm::report_fatal_error( - "expected pattern to replace the root operation or modify it in place"); + if (impl.config.allowPatternRollback) { + // Check that the root was either replaced or updated in place. + auto newRewrites = llvm::drop_begin(impl.rewrites, curState.numRewrites); + auto replacedRoot = [&] { + return hasRewrite(newRewrites, op); + }; + auto updatedRootInPlace = [&] { + return hasRewrite(newRewrites, op); + }; + if (!replacedRoot() && !updatedRootInPlace()) + llvm::report_fatal_error("expected pattern to replace the root operation " + "or modify it in place"); + } #endif // MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS // Legalize each of the actions registered during application. diff --git a/mlir/python/mlir/dialects/transform/xegpu.py b/mlir/python/mlir/dialects/transform/xegpu.py index 2918bf592880a..ce8015d8f557b 100644 --- a/mlir/python/mlir/dialects/transform/xegpu.py +++ b/mlir/python/mlir/dialects/transform/xegpu.py @@ -7,6 +7,7 @@ try: from ...ir import * + from ...dialects import transform from .._ods_common import _cext as _ods_cext from .._ods_common import ( MixedValues, @@ -20,6 +21,26 @@ from typing import Union, Optional +@_ods_cext.register_operation(_Dialect, replace=True) +class GetDescOp(GetDescOp): + """Specialization for GetDescOp class.""" + + def __init__( + self, + target: Value, + *, + loc=None, + ip=None, + ): + desc_type = transform.AnyOpType.get() + super().__init__( + desc_type, + target, + loc=loc, + ip=ip, + ) + + @_ods_cext.register_operation(_Dialect, replace=True) class SetDescLayoutOp(SetDescLayoutOp): """Specialization for SetDescLayoutOp class.""" @@ -64,3 +85,50 @@ def __init__( loc=loc, ip=ip, ) + + +@_ods_cext.register_operation(_Dialect, replace=True) +class SetOpLayoutAttrOp(SetOpLayoutAttrOp): + """Specialization for SetOpLayoutAttrOp class.""" + + def __init__( + self, + target: Union[Operation, Value], + sg_layout: MixedValues, + sg_data: MixedValues, + *, + inst_data: Optional[MixedValues] = None, + index: Optional[Union[int, Attribute]] = None, + result: Optional[Union[bool, Attribute]] = None, + loc=None, + ip=None, + ): + inst_data = [] if inst_data is None else inst_data + ( + dynamic_sg_layout, + static_sg_layout, + _, + ) = _dispatch_dynamic_index_list(sg_layout) + ( + dynamic_sg_data, + static_sg_data, + _, + ) = _dispatch_dynamic_index_list(sg_data) + ( + dynamic_inst_data, + static_inst_data, + _, + ) = _dispatch_dynamic_index_list(inst_data) + super().__init__( + _get_op_result_or_value(target), + dynamic_sg_layout, + dynamic_sg_data, + dynamic_inst_data, + static_sg_layout=static_sg_layout, + static_sg_data=static_sg_data, + static_inst_data=static_inst_data, + index=index, + result=result, + loc=loc, + ip=ip, + ) diff --git a/mlir/test/Conversion/XeGPUToXeVM/create_nd_tdesc.mlir b/mlir/test/Conversion/XeGPUToXeVM/create_nd_tdesc.mlir index 09ef76c9d1740..109312218afae 100644 --- a/mlir/test/Conversion/XeGPUToXeVM/create_nd_tdesc.mlir +++ b/mlir/test/Conversion/XeGPUToXeVM/create_nd_tdesc.mlir @@ -29,13 +29,13 @@ gpu.module @create_nd_tdesc { // CHECK: %[[CST_1:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK: %[[INTPTR:.*]] = memref.extract_aligned_pointer_as_index %[[MEMSPACECAST]] : memref<16x32xf32> -> index + // CHECK: %[[BASE_ADDR2:.*]] = arith.index_castui %[[INTPTR]] : index to i64 // CHECK: %[[OFFSET_W2:.*]] = arith.constant 0 : i32 // CHECK: %[[OFFSET_H2:.*]] = arith.constant 0 : i32 // CHECK: %[[C32_I64:.*]] = arith.constant 32 : i64 // CHECK: %[[SHAPE_W2:.*]] = arith.trunci %[[C32_I64]] : i64 to i32 // CHECK: %[[C16_I64:.*]] = arith.constant 16 : i64 // CHECK: %[[SHAPE_H2:.*]] = arith.trunci %[[C16_I64]] : i64 to i32 - // CHECK: %[[BASE_ADDR2:.*]] = arith.index_castui %[[INTPTR]] : index to i64 // CHECK: %[[VAR14:.*]] = vector.bitcast %[[CST_1]] : vector<8xi32> to vector<4xi64> // CHECK: %[[VAR15:.*]] = vector.insert %[[BASE_ADDR2]], %[[VAR14]] [0] : i64 into vector<4xi64> // CHECK: %[[VAR16:.*]] = vector.bitcast %[[VAR15]] : vector<4xi64> to vector<8xi32> @@ -53,11 +53,11 @@ gpu.module @create_nd_tdesc { %BLOCK_DMODEL = arith.constant 16 : index // CHECK: %[[CST_4:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK: %[[INTPTR_5:.*]] = memref.extract_aligned_pointer_as_index %[[DYN]] : memref -> index + // CHECK: %[[VAR23:.*]] = arith.index_castui %[[INTPTR_5]] : index to i64 // CHECK: %[[C0_I32_6:.*]] = arith.constant 0 : i32 // CHECK: %[[C0_I32_7:.*]] = arith.constant 0 : i32 // CHECK: %[[VAR21:.*]] = arith.index_cast %[[C16]] : index to i32 // CHECK: %[[VAR22:.*]] = arith.index_cast %[[C64]] : index to i32 - // CHECK: %[[VAR23:.*]] = arith.index_castui %[[INTPTR_5]] : index to i64 // CHECK: %[[VAR24:.*]] = vector.bitcast %[[CST_4]] : vector<8xi32> to vector<4xi64> // CHECK: %[[VAR25:.*]] = vector.insert %[[VAR23]], %[[VAR24]] [0] : i64 into vector<4xi64> // CHECK: %[[VAR26:.*]] = vector.bitcast %[[VAR25]] : vector<4xi64> to vector<8xi32> diff --git a/mlir/test/Conversion/XeGPUToXeVM/loadstore_1d.mlir b/mlir/test/Conversion/XeGPUToXeVM/loadstore_1d.mlir new file mode 100644 index 0000000000000..7b4ad9ec2df03 --- /dev/null +++ b/mlir/test/Conversion/XeGPUToXeVM/loadstore_1d.mlir @@ -0,0 +1,36 @@ +// RUN: mlir-opt -convert-xegpu-to-xevm -canonicalize %s | FileCheck %s + +gpu.module @load_store_check { + // CHECK-LABEL: @load_store( + // CHECK-SAME: %[[SRC:.*]]: memref<512xf32, 1>, %[[DST:.*]]: memref<256xf32, 1> + gpu.func @load_store(%src: memref<512xf32, 1>, %dst: memref<256xf32, 1>) kernel { + // CHECK: %[[C512:.*]] = arith.constant 512 : i64 + // CHECK: %[[C384:.*]] = arith.constant 384 : i64 + + // CHECK: %[[SRCCE:.*]] = memref.memory_space_cast %[[SRC]] : memref<512xf32, 1> to memref<512xf32> + %srcce = memref.memory_space_cast %src : memref<512xf32, 1> to memref<512xf32> + // CHECK: %[[DSTTE:.*]] = memref.memory_space_cast %[[DST]] : memref<256xf32, 1> to memref<256xf32> + %dstte = memref.memory_space_cast %dst : memref<256xf32, 1> to memref<256xf32> + + // CHECK: %[[INTPTR:.*]] = memref.extract_aligned_pointer_as_index %[[SRCCE]] : memref<512xf32> -> index + // CHECK: %[[INTPTR_I64:.*]] = arith.index_castui %[[INTPTR]] : index to i64 + %src_tdesc = xegpu.create_nd_tdesc %srcce : memref<512xf32> -> !xegpu.tensor_desc<32xf32> + // CHECK: %[[ADDR:.*]] = arith.addi %[[INTPTR_I64]], %[[C384]] : i64 + // CHECK: %[[PTR:.*]] = llvm.inttoptr %[[ADDR]] : i64 to !llvm.ptr<1> + // CHECK: %[[LOAD:.*]] = xevm.blockload %[[PTR]] <{cache_control = #xevm.load_cache_control}> + // CHECK-SAME: : (!llvm.ptr<1>) -> vector<2xi32> + %loaded = xegpu.load_nd %src_tdesc[96] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> + : !xegpu.tensor_desc<32xf32> -> vector<2xf32> + + // CHECK: %[[INTPTR1:.*]] = memref.extract_aligned_pointer_as_index %[[DSTTE]] : memref<256xf32> -> index + // CHECK: %[[INTPTR1_I64:.*]] = arith.index_castui %[[INTPTR1]] : index to i64 + %dst_tdesc = xegpu.create_nd_tdesc %dstte : memref<256xf32> -> !xegpu.tensor_desc<32xf32, #xegpu.block_tdesc_attr> + // CHECK: %[[ADDR1:.*]] = arith.addi %[[INTPTR1_I64]], %[[C512]] : i64 + // CHECK: %[[PTR1:.*]] = llvm.inttoptr %[[ADDR1]] : i64 to !llvm.ptr<1> + // CHECK: xevm.blockstore %[[PTR1]], %[[LOAD]] <{cache_control = #xevm.store_cache_control}> + // CHECK-SAME: : (!llvm.ptr<1>, vector<2xi32>) + xegpu.store_nd %loaded, %dst_tdesc[128] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> + : vector<2xf32>, !xegpu.tensor_desc<32xf32, #xegpu.block_tdesc_attr> + gpu.return + } +} diff --git a/mlir/test/Conversion/XeGPUToXeVM/loadstore_nd.mlir b/mlir/test/Conversion/XeGPUToXeVM/loadstore_nd.mlir index 4c6bbf25b4728..95774ca67c4f2 100644 --- a/mlir/test/Conversion/XeGPUToXeVM/loadstore_nd.mlir +++ b/mlir/test/Conversion/XeGPUToXeVM/loadstore_nd.mlir @@ -16,6 +16,7 @@ gpu.module @load_store_check { %src_tdesc = xegpu.create_nd_tdesc %srcce : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32> + //CHECK: %[[LD_SIZEOF_F32:.*]] = arith.constant 4 : i32 //CHECK: %[[LD_DESC_I64:.*]] = vector.bitcast %[[LD_DESC]] : vector<8xi32> to vector<4xi64> //CHECK: %[[LD_INTPTR:.*]] = vector.extract %[[LD_DESC_I64]][0] : i64 from vector<4xi64> //CHECK: %[[LD_BASE_W:.*]] = vector.extract %[[LD_DESC]][2] : i32 from vector<8xi32> @@ -25,7 +26,6 @@ gpu.module @load_store_check { //CHECK: %[[LD_TILE_H64:.*]] = arith.constant 0 : i64 //CHECK: %[[LD_TILE_H:.*]] = arith.trunci %[[LD_TILE_H64]] : i64 to i32 //CHECK: %[[LD_LLVMPTR:.*]] = llvm.inttoptr %[[LD_INTPTR]] : i64 to !llvm.ptr<1> - //CHECK: %[[LD_SIZEOF_F32:.*]] = arith.constant 4 : i32 //CHECK: %[[LD_BASE_ROW_IN_BYTES:.*]] = arith.muli %[[LD_BASE_W]], %[[LD_SIZEOF_F32]] : i32 //CHECK: %[[LD_LOADED_I32:.*]] = xevm.blockload2d %[[LD_LLVMPTR]], %[[LD_BASE_ROW_IN_BYTES]], //CHECK-SAME: %[[LD_BASE_H]], %[[LD_BASE_ROW_IN_BYTES]], %[[LD_TILE_W]], %[[LD_TILE_H]] @@ -52,6 +52,7 @@ gpu.module @load_store_check { // CHECK: %[[DESC:.*]] = vector.insert {{.*}}, %[[DESC_4]] [5] : i32 into vector<8xi32> %dst_tdesc = xegpu.create_nd_tdesc %dstte : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.block_tdesc_attr> + //CHECK: %[[SIZEOF_F32:.*]] = arith.constant 4 : i32 //CHECK: %[[DESC_I64:.*]] = vector.bitcast %[[DESC]] : vector<8xi32> to vector<4xi64> //CHECK: %[[INTPTR:.*]] = vector.extract %[[DESC_I64]][0] : i64 from vector<4xi64> //CHECK: %[[BASE_W:.*]] = vector.extract %[[DESC]][2] : i32 from vector<8xi32> @@ -61,7 +62,6 @@ gpu.module @load_store_check { //CHECK: %[[TILE_H64:.*]] = arith.constant 0 : i64 //CHECK: %[[TILE_H:.*]] = arith.trunci %[[TILE_H64]] : i64 to i32 //CHECK: %[[LLVMPTR:.*]] = llvm.inttoptr %[[INTPTR]] : i64 to !llvm.ptr<1> - //CHECK: %[[SIZEOF_F32:.*]] = arith.constant 4 : i32 //CHECK: %[[BASE_ROW_IN_BYTES:.*]] = arith.muli %[[BASE_W]], %[[SIZEOF_F32]] : i32 //CHECK: %[[FLAT_VALUE_I32:.*]] = vector.bitcast %[[LOADED_F32_MODIFIED]] : vector<8xf32> to vector<8xi32> //CHECK: xevm.blockstore2d %[[LLVMPTR]], %[[BASE_ROW_IN_BYTES]], %[[BASE_H]], %[[BASE_ROW_IN_BYTES]], diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir index 5e857599b65ea..d50cc41684e3c 100644 --- a/mlir/test/Dialect/LLVMIR/rocdl.mlir +++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir @@ -650,6 +650,39 @@ llvm.func @rocdl.ds.read.tr(%ptr : !llvm.ptr<3>) -> vector<4xf16> { llvm.return %r3 : vector<4xf16> } +llvm.func @rocdl.load.tr.ops(%gl_ptr : !llvm.ptr<1>, %ds_ptr : !llvm.ptr<3>) { + // CHECK-LABEL: @rocdl.load.tr.ops + // CHECK-SAME: (%[[GL_PTR:.+]]: !llvm.ptr<1>, %[[DS_OTR:.+]]: !llvm.ptr<3>) + // CHECK: rocdl.global.load.tr4.b64 %[[GL_PTR]] : !llvm.ptr<1> -> vector<2xi32> + // CHECK: rocdl.global.load.tr.b64 %[[GL_PTR]] : !llvm.ptr<1> -> vector<2xi32> + // CHECK: rocdl.global.load.tr6.b96 %[[GL_PTR]] : !llvm.ptr<1> -> vector<3xi32> + // CHECK: rocdl.global.load.tr.b128 %[[GL_PTR]] : !llvm.ptr<1> -> vector<8xi16> + // CHECK: rocdl.global.load.tr.b128 %[[GL_PTR]] : !llvm.ptr<1> -> vector<8xf16> + // CHECK: rocdl.global.load.tr.b128 %[[GL_PTR]] : !llvm.ptr<1> -> vector<8xbf16> + // CHECK: rocdl.ds.load.tr4.b64 %[[DS_OTR]] : !llvm.ptr<3> -> vector<2xi32> + // CHECK: rocdl.ds.load.tr8.b64 %[[DS_OTR]] : !llvm.ptr<3> -> vector<2xi32> + // CHECK: rocdl.ds.load.tr6.b96 %[[DS_OTR]] : !llvm.ptr<3> -> vector<3xi32> + // CHECK: rocdl.ds.load.tr16.b128 %[[DS_OTR]] : !llvm.ptr<3> -> vector<8xi16> + // CHECK: rocdl.ds.load.tr16.b128 %[[DS_OTR]] : !llvm.ptr<3> -> vector<8xf16> + // CHECK: rocdl.ds.load.tr16.b128 %[[DS_OTR]] : !llvm.ptr<3> -> vector<8xbf16> + // CHECK: llvm.return + + rocdl.global.load.tr4.b64 %gl_ptr : !llvm.ptr<1> -> vector<2xi32> + rocdl.global.load.tr.b64 %gl_ptr : !llvm.ptr<1> -> vector<2xi32> + rocdl.global.load.tr6.b96 %gl_ptr : !llvm.ptr<1> -> vector<3xi32> + rocdl.global.load.tr.b128 %gl_ptr : !llvm.ptr<1> -> vector<8xi16> + rocdl.global.load.tr.b128 %gl_ptr : !llvm.ptr<1> -> vector<8xf16> + rocdl.global.load.tr.b128 %gl_ptr : !llvm.ptr<1> -> vector<8xbf16> + + rocdl.ds.load.tr4.b64 %ds_ptr : !llvm.ptr<3> -> vector<2xi32> + rocdl.ds.load.tr8.b64 %ds_ptr : !llvm.ptr<3> -> vector<2xi32> + rocdl.ds.load.tr6.b96 %ds_ptr : !llvm.ptr<3> -> vector<3xi32> + rocdl.ds.load.tr16.b128 %ds_ptr : !llvm.ptr<3> -> vector<8xi16> + rocdl.ds.load.tr16.b128 %ds_ptr : !llvm.ptr<3> -> vector<8xf16> + rocdl.ds.load.tr16.b128 %ds_ptr : !llvm.ptr<3> -> vector<8xbf16> + llvm.return +} + llvm.func @rocdl.load.to.lds(%src : !llvm.ptr<7>, %dst: !llvm.ptr<3>) { // CHECK-LABEL @rocdl.load.to.lds //CHECK: rocdl.load.to.lds %{{.*}}, %{{.*}}, 4, 0, 0 : <7> diff --git a/mlir/test/Dialect/XeGPU/propagate-layout-inst-data.mlir b/mlir/test/Dialect/XeGPU/propagate-layout-inst-data.mlir index 58461b8be52c4..c31ef323a94d2 100644 --- a/mlir/test/Dialect/XeGPU/propagate-layout-inst-data.mlir +++ b/mlir/test/Dialect/XeGPU/propagate-layout-inst-data.mlir @@ -2,17 +2,17 @@ // CHECK-LABEL: func.func @dpas_f16( // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: memref<8x16xf16>, %[[ARG1:[0-9a-zA-Z]+]]: memref<16x16xf16>, %[[ARG2:[0-9a-zA-Z]+]]: memref<8x16xf32>) { -// CHECK: %[[CST:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<0.000000e+00> : vector<8x16xf32> -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][{{.*}}] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -// CHECK: %[[T1:.*]] = xegpu.create_nd_tdesc %[[ARG1]][{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -// CHECK: %[[T2:.*]] = xegpu.load_nd %[[T0]] {layout_result_0 = #xegpu.layout} : -// CHECK-SAME: !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> -// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T1]] {layout_result_0 = #xegpu.layout} : -// CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> -// CHECK: %[[T4:.*]] = xegpu.dpas %[[T2]], %[[T3]], %[[CST]] {layout_result_0 = #xegpu.layout} : +// CHECK: %[[CST:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<0.000000e+00> : vector<8x16xf32> +// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][{{.*}}] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16, #xegpu.layout +// CHECK: %[[T1:.*]] = xegpu.create_nd_tdesc %[[ARG1]][{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> +// CHECK: %[[T2:.*]] = xegpu.load_nd %[[T0]] {layout_result_0 = #xegpu.layout} : +// CHECK-SAME: !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> +// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T1]] {layout_result_0 = #xegpu.layout} : +// CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> +// CHECK: %[[T4:.*]] = xegpu.dpas %[[T2]], %[[T3]], %[[CST]] {layout_result_0 = #xegpu.layout} : // CHECK-SAME: vector<8x16xf16>, vector<16x16xf16>, vector<8x16xf32> -> vector<8x16xf32> -// CHECK: %[[T5:.*]] = xegpu.create_nd_tdesc %[[ARG2]][{{.*}}] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> -// CHECK: xegpu.store_nd %[[T4]], %[[T5]] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> +// CHECK: %[[T5:.*]] = xegpu.create_nd_tdesc %[[ARG2]][{{.*}}] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout +// CHECK: xegpu.store_nd %[[T4]], %[[T5]] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> gpu.module @test { func.func @dpas_f16(%arg0: memref<8x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<8x16xf32>) { @@ -46,18 +46,18 @@ gpu.module @test_kernel { %out:3 = scf.for %k = %c0 to %c1024 step %c32 iter_args(%arg0 = %a_tdesc, %arg1 = %b_tdesc, %arg2 = %c_tdesc) -> (!xegpu.tensor_desc<16x32xf16>, !xegpu.tensor_desc<16x32xf16>, !xegpu.tensor_desc<16x32xf16>) { - //CHECK: xegpu.load_nd {{.*}} {layout_result_0 = #xegpu.layout} : - //CHECK-SAME: !xegpu.tensor_desc<16x32xf16, #xegpu.layout> -> vector<16x32xf16> + //CHECK: xegpu.load_nd {{.*}} {layout_result_0 = #xegpu.layout} : + //CHECK-SAME: !xegpu.tensor_desc<16x32xf16, #xegpu.layout> -> vector<16x32xf16> %a = xegpu.load_nd %arg0 : !xegpu.tensor_desc<16x32xf16> -> vector<16x32xf16> %b = xegpu.load_nd %arg1 : !xegpu.tensor_desc<16x32xf16> -> vector<16x32xf16> - //CHECK-COUNT: arith.addf {{.*}} {layout_result_0 = #xegpu.layout} : vector<16x32xf16> + //CHECK-COUNT: arith.addf {{.*}} {layout_result_0 = #xegpu.layout} : vector<16x32xf16> %c = arith.addf %a, %b : vector<16x32xf16> - //CHECK-COUNT: xegpu.store_nd {{.*}} : vector<16x32xf16>, !xegpu.tensor_desc<16x32xf16, #xegpu.layout>> + //CHECK-COUNT: xegpu.store_nd {{.*}} : vector<16x32xf16>, !xegpu.tensor_desc<16x32xf16, #xegpu.layout> xegpu.store_nd %c, %arg2: vector<16x32xf16>, !xegpu.tensor_desc<16x32xf16> - //CHECK-COUNT: xegpu.update_nd_offset {{.*}} : !xegpu.tensor_desc<16x32xf16, #xegpu.layout> + //CHECK-COUNT: xegpu.update_nd_offset {{.*}} : !xegpu.tensor_desc<16x32xf16, #xegpu.layout> %a_next_tdesc = xegpu.update_nd_offset %arg0, [%c0, %c32] : !xegpu.tensor_desc<16x32xf16> %b_next_tdesc = xegpu.update_nd_offset %arg1, [%c0, %c32] : !xegpu.tensor_desc<16x32xf16> %c_next_tdesc = xegpu.update_nd_offset %arg2, [%c0, %c32] : !xegpu.tensor_desc<16x32xf16> @@ -85,18 +85,18 @@ gpu.module @test_kernel { %out:3 = scf.for %k = %c0 to %c1024 step %c32 iter_args(%arg0 = %a_tdesc, %arg1 = %b_tdesc, %arg2 = %c_tdesc) -> (!xegpu.tensor_desc<12x32xf16>, !xegpu.tensor_desc<12x32xf16>, !xegpu.tensor_desc<12x32xf16>) { - //CHECK: xegpu.load_nd {{.*}} {layout_result_0 = #xegpu.layout} : - //CHECK-SAME: !xegpu.tensor_desc<12x32xf16, #xegpu.layout> -> vector<12x32xf16> + //CHECK: xegpu.load_nd {{.*}} {layout_result_0 = #xegpu.layout} : + //CHECK-SAME: !xegpu.tensor_desc<12x32xf16, #xegpu.layout> -> vector<12x32xf16> %a = xegpu.load_nd %arg0 : !xegpu.tensor_desc<12x32xf16> -> vector<12x32xf16> %b = xegpu.load_nd %arg1 : !xegpu.tensor_desc<12x32xf16> -> vector<12x32xf16> - //CHECK-COUNT: arith.addf {{.*}} {layout_result_0 = #xegpu.layout} : vector<12x32xf16> + //CHECK-COUNT: arith.addf {{.*}} {layout_result_0 = #xegpu.layout} : vector<12x32xf16> %c = arith.addf %a, %b : vector<12x32xf16> - //CHECK-COUNT: xegpu.store_nd {{.*}} : vector<12x32xf16>, !xegpu.tensor_desc<12x32xf16, #xegpu.layout>> + //CHECK-COUNT: xegpu.store_nd {{.*}} : vector<12x32xf16>, !xegpu.tensor_desc<12x32xf16, #xegpu.layout> xegpu.store_nd %c, %arg2: vector<12x32xf16>, !xegpu.tensor_desc<12x32xf16> - //CHECK-COUNT: xegpu.update_nd_offset {{.*}} : !xegpu.tensor_desc<12x32xf16, #xegpu.layout> + //CHECK-COUNT: xegpu.update_nd_offset {{.*}} : !xegpu.tensor_desc<12x32xf16, #xegpu.layout> %a_next_tdesc = xegpu.update_nd_offset %arg0, [%c0, %c32] : !xegpu.tensor_desc<12x32xf16> %b_next_tdesc = xegpu.update_nd_offset %arg1, [%c0, %c32] : !xegpu.tensor_desc<12x32xf16> %c_next_tdesc = xegpu.update_nd_offset %arg2, [%c0, %c32] : !xegpu.tensor_desc<12x32xf16> @@ -114,7 +114,7 @@ gpu.module @test { // CHECK: %{{.*}} = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<16xi1> // CHECK: %{{.*}} = arith.constant {layout_result_0 = #xegpu.layout} dense<12> : vector<16xindex> // CHECK: %{{.*}} = xegpu.load %[[ARG0]][%{{.*}}], %{{.*}} <{chunk_size = 8 : i64}> -// CHECK-SAME: {layout_result_0 = #xegpu.layout} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> +// CHECK-SAME: {layout_result_0 = #xegpu.layout} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> // CHECK: xegpu.store %0, %[[ARG0]][%{{.*}}], %{{.*}} <{chunk_size = 8 : i64}> : vector<16x8xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> func.func @scatter_ops_chunksize(%src: memref<256xf16>) { %1 = arith.constant dense<1>: vector<16xi1> diff --git a/mlir/test/Dialect/XeGPU/propagate-layout.mlir b/mlir/test/Dialect/XeGPU/propagate-layout.mlir index 61e315d0d2080..eb004932af4be 100644 --- a/mlir/test/Dialect/XeGPU/propagate-layout.mlir +++ b/mlir/test/Dialect/XeGPU/propagate-layout.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -xevm-attach-target='chip=pvc' -xegpu-propagate-layout -split-input-file %s | FileCheck %s +// RUN: mlir-opt -xevm-attach-target='chip=pvc' -xegpu-propagate-layout="layout-kind=lane" -split-input-file %s | FileCheck %s gpu.module @test { // CHECK-LABEL: func.func @dpas_f16( diff --git a/mlir/test/Dialect/XeGPU/transform-ops-invalid.mlir b/mlir/test/Dialect/XeGPU/transform-ops-invalid.mlir index 303584518f9f4..726b6748452ae 100644 --- a/mlir/test/Dialect/XeGPU/transform-ops-invalid.mlir +++ b/mlir/test/Dialect/XeGPU/transform-ops-invalid.mlir @@ -13,3 +13,61 @@ module attributes {transform.with_named_sequence} { transform.yield } } + +// ----- + +// CHECK-LABEL: @set_op_layout_attr_bad_result_index +func.func @set_op_layout_attr_bad_result_index(%arg0: memref<4096x4096xf16>) { + %0 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %1 = xegpu.load_nd %0[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + %2 = arith.extf %1 : vector<256x32xf16> to vector<256x32xf32> + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["arith.extf"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // expected-error@below {{Index exceeds the number of op results}} + transform.xegpu.set_op_layout_attr %0 result index = 1 sg_layout = [8, 4] sg_data = [32, 64] : !transform.any_op + transform.yield + } +} + +// ----- + +// CHECK-LABEL: @set_op_layout_attr_bad_operand_index +func.func @set_op_layout_attr_bad_operand_index(%arg0: memref<4096x4096xf16>) { + %0 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %1 = xegpu.load_nd %0[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + %2 = arith.extf %1 : vector<256x32xf16> to vector<256x32xf32> + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["arith.extf"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // expected-error@below {{Index exceeds the number of op operands}} + transform.xegpu.set_op_layout_attr %0 index = 1 sg_layout = [8, 4] sg_data = [32, 64] : !transform.any_op + transform.yield + } +} + +// ----- + +// CHECK-LABEL: @set_op_layout_attr_multiple +func.func @set_op_layout_attr_multiple(%arg0: memref<4096x4096xf16>) { + %0 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %1 = xegpu.load_nd %0[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + %2 = arith.extf %1 : vector<256x32xf16> to vector<256x32xf32> + %3 = arith.extf %2 : vector<256x32xf32> to vector<256x32xf64> + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["arith.extf"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // expected-error@below {{Requires exactly one targetOp handle (got 2)}} + transform.xegpu.set_op_layout_attr %0 sg_layout = [8, 4] sg_data = [32, 64] : !transform.any_op + transform.yield + } +} diff --git a/mlir/test/Dialect/XeGPU/transform-ops.mlir b/mlir/test/Dialect/XeGPU/transform-ops.mlir index 23e1cd946b4cd..bd6a79244ed30 100644 --- a/mlir/test/Dialect/XeGPU/transform-ops.mlir +++ b/mlir/test/Dialect/XeGPU/transform-ops.mlir @@ -1,5 +1,67 @@ // RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics | FileCheck %s +// CHECK-LABEL: @get_desc_op_a +func.func @get_desc_op_a(%arg0: memref<4096x4096xf16>, %arg1: memref<4096x4096xf16>, %arg2: memref<4096x4096xf16>) { + %c32 = arith.constant 32 : index + %c4096 = arith.constant 4096 : index + %c0 = arith.constant 0 : index + %0 = xegpu.create_nd_tdesc %arg2 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x256xf16> + %1 = xegpu.load_nd %0[%c0, %c0] : !xegpu.tensor_desc<256x256xf16> -> vector<256x256xf16> + // expected-remark @below {{found desc op}} + %3 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %4 = xegpu.create_nd_tdesc %arg1 : memref<4096x4096xf16> -> !xegpu.tensor_desc<32x256xf16> + %2 = scf.for %arg3 = %c0 to %c4096 step %c32 iter_args(%arg4 = %1) -> (vector<256x256xf16>) { + %5 = xegpu.load_nd %3[%c0, %arg3] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + %6 = xegpu.load_nd %4[%arg3, %c0] : !xegpu.tensor_desc<32x256xf16> -> vector<32x256xf16> + %7 = xegpu.dpas %5, %6, %arg4 : vector<256x32xf16>, vector<32x256xf16>, vector<256x256xf16> -> vector<256x256xf16> + scf.yield %7 : vector<256x256xf16> + } + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["xegpu.dpas"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = transform.get_operand %0[0] : (!transform.any_op) -> !transform.any_value + %2 = transform.xegpu.get_desc_op %1 : (!transform.any_value) -> !transform.any_op + transform.debug.emit_remark_at %2, "found desc op" : !transform.any_op + transform.yield + } +} + +// ----- + +// CHECK-LABEL: @get_desc_op_c +func.func @get_desc_op_c(%arg0: memref<4096x4096xf16>, %arg1: memref<4096x4096xf16>, %arg2: memref<4096x4096xf16>) { + %c32 = arith.constant 32 : index + %c4096 = arith.constant 4096 : index + %c0 = arith.constant 0 : index + // expected-remark @below {{found desc op}} + %0 = xegpu.create_nd_tdesc %arg2 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x256xf16> + %1 = xegpu.load_nd %0[%c0, %c0] : !xegpu.tensor_desc<256x256xf16> -> vector<256x256xf16> + %3 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %4 = xegpu.create_nd_tdesc %arg1 : memref<4096x4096xf16> -> !xegpu.tensor_desc<32x256xf16> + %2 = scf.for %arg3 = %c0 to %c4096 step %c32 iter_args(%arg4 = %1) -> (vector<256x256xf16>) { + %5 = xegpu.load_nd %3[%c0, %arg3] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + %6 = xegpu.load_nd %4[%arg3, %c0] : !xegpu.tensor_desc<32x256xf16> -> vector<32x256xf16> + %7 = xegpu.dpas %5, %6, %arg4 : vector<256x32xf16>, vector<32x256xf16>, vector<256x256xf16> -> vector<256x256xf16> + scf.yield %7 : vector<256x256xf16> + } + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["xegpu.dpas"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = transform.get_operand %0[2] : (!transform.any_op) -> !transform.any_value + %2 = transform.xegpu.get_desc_op %1 : (!transform.any_value) -> !transform.any_op + transform.debug.emit_remark_at %2, "found desc op" : !transform.any_op + transform.yield + } +} + +// ----- + // CHECK-LABEL: @set_desc_layout func.func @set_desc_layout(%arg0: memref<4096x4096xf16>) { // CHECK: %[[V0:.+]] = xegpu.create_nd_tdesc %arg0 @@ -56,3 +118,137 @@ module attributes {transform.with_named_sequence} { transform.yield } } + +// ----- + +// CHECK-LABEL: @set_op_layout_attr_result_default_index +func.func @set_op_layout_attr_result_default_index(%arg0: memref<4096x4096xf16>, %arg1: memref<4096x4096xf16>, %arg2: memref<4096x4096xf16>) { + %0 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %1 = xegpu.load_nd %0[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + %2 = xegpu.create_nd_tdesc %arg1 : memref<4096x4096xf16> -> !xegpu.tensor_desc<32x256xf16> + %3 = xegpu.load_nd %2[0, 0] : !xegpu.tensor_desc<32x256xf16> -> vector<32x256xf16> + %4 = xegpu.create_nd_tdesc %arg2 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x256xf16> + %5 = xegpu.load_nd %4[0, 0] : !xegpu.tensor_desc<256x256xf16> -> vector<256x256xf16> + // CHECK: = xegpu.dpas + // CHECK-SAME: {layout_result_0 = #xegpu.layout} + %6 = xegpu.dpas %1, %3, %5 : vector<256x32xf16>, vector<32x256xf16>, vector<256x256xf16> -> vector<256x256xf16> + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["xegpu.dpas"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // CHECK: transform.xegpu.set_op_layout_attr %{{.*}} + transform.xegpu.set_op_layout_attr %0 result sg_layout = [8, 4] sg_data = [32, 64] inst_data = [8, 16] : !transform.any_op + transform.yield + } +} + +// ----- + +// CHECK-LABEL: @set_op_layout_attr_result_sg_param +func.func @set_op_layout_attr_result_sg_param(%arg0: memref<4096x4096xf16>) { + %0 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %1 = xegpu.load_nd %0[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + // CHECK: = arith.extf %1 + // CHECK-SAME: {layout_result_0 = #xegpu.layout} + %2 = arith.extf %1 : vector<256x32xf16> to vector<256x32xf32> + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["arith.extf"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // CHECK: transform.xegpu.set_op_layout_attr %{{.*}} + %layout0 = transform.param.constant 8 : i64 -> !transform.param + transform.xegpu.set_op_layout_attr %0 result sg_layout = [%layout0, 4] sg_data = [32, 64] inst_data = [8, 16] : !transform.any_op, !transform.param + transform.yield + } +} + +// ----- + +// CHECK-LABEL: @set_op_layout_attr_result_sg_param2 +func.func @set_op_layout_attr_result_sg_param2(%arg0: memref<4096x4096xf16>) { + %0 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %1 = xegpu.load_nd %0[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + // CHECK: = arith.extf %1 + // CHECK-SAME: {layout_result_0 = #xegpu.layout} + %2 = arith.extf %1 : vector<256x32xf16> to vector<256x32xf32> + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["arith.extf"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // CHECK: transform.xegpu.set_op_layout_attr %{{.*}} + %layout0 = transform.param.constant 8 : i64 -> !transform.param + %layout1 = transform.param.constant 4 : i64 -> !transform.param + transform.xegpu.set_op_layout_attr %0 result sg_layout = [%layout0, %layout1] sg_data = [32, 64] inst_data = [8, 16] : !transform.any_op, !transform.param, !transform.param + transform.yield + } +} + +// ----- + +// CHECK-LABEL: @set_op_layout_attr_result0 +func.func @set_op_layout_attr_result0(%arg0: memref<4096x4096xf16>) { + %0 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %1 = xegpu.load_nd %0[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + // CHECK: = arith.extf %1 + // CHECK-SAME: {layout_result_0 = #xegpu.layout} + %2 = arith.extf %1 : vector<256x32xf16> to vector<256x32xf32> + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["arith.extf"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // CHECK: transform.xegpu.set_op_layout_attr %{{.*}} + transform.xegpu.set_op_layout_attr %0 result index = 0 sg_layout = [8, 4] sg_data = [32, 64] inst_data = [8, 16] : !transform.any_op + transform.yield + } +} + +// ----- + +// CHECK-LABEL: @set_op_layout_attr_operand_minimal +func.func @set_op_layout_attr_operand_minimal(%arg0: memref<4096x4096xf16>) { + %0 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %1 = xegpu.load_nd %0[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + // CHECK: = arith.extf %1 + // CHECK-SAME: {layout_operand_0 = #xegpu.layout} + %2 = arith.extf %1 : vector<256x32xf16> to vector<256x32xf32> + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["arith.extf"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // CHECK: transform.xegpu.set_op_layout_attr %{{.*}} + transform.xegpu.set_op_layout_attr %0 sg_layout = [8, 4] sg_data = [32, 64] : !transform.any_op + transform.yield + } +} +// ----- + +// CHECK-LABEL: @set_op_layout_attr_operand1 +func.func @set_op_layout_attr_operand1(%arg0: memref<4096x4096xf16>, %arg1: memref<4096x4096xf16>) { + %0 = xegpu.create_nd_tdesc %arg0 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %1 = xegpu.load_nd %0[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + %2 = xegpu.create_nd_tdesc %arg1 : memref<4096x4096xf16> -> !xegpu.tensor_desc<256x32xf16> + %3 = xegpu.load_nd %2[0, 0] : !xegpu.tensor_desc<256x32xf16> -> vector<256x32xf16> + // CHECK: = arith.addf %1, %3 + // CHECK-SAME: {layout_operand_1 = #xegpu.layout} + %6 = arith.addf %1, %3 : vector<256x32xf16> + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["arith.addf"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // CHECK: transform.xegpu.set_op_layout_attr %{{.*}} + transform.xegpu.set_op_layout_attr %0 index = 1 sg_layout = [8, 4] sg_data = [32, 64] inst_data = [8, 16] : !transform.any_op + transform.yield + } +} diff --git a/mlir/test/Interfaces/InferShapedTypeOpInterface/resolve-shaped-type-result-dims.mlir b/mlir/test/Interfaces/InferShapedTypeOpInterface/resolve-shaped-type-result-dims.mlir index 4fa7406f21042..624e0990a4bb3 100644 --- a/mlir/test/Interfaces/InferShapedTypeOpInterface/resolve-shaped-type-result-dims.mlir +++ b/mlir/test/Interfaces/InferShapedTypeOpInterface/resolve-shaped-type-result-dims.mlir @@ -1,4 +1,5 @@ -// RUN: mlir-opt %s -resolve-shaped-type-result-dims -split-input-file | FileCheck %s +// RUN: mlir-opt %s --pass-pipeline="builtin.module(func.func(resolve-shaped-type-result-dims{error-on-pattern-iteration-limit=false}))" -split-input-file | FileCheck %s +// See %test_unreifiable_result_shape below for why `error-on-partition-iteration-limit` is set to false. func.func @result_shape(%arg0 : tensor<2x3x?xf32>, %arg1 : tensor) -> (index, index, index, index, index) { @@ -27,12 +28,14 @@ func.func @result_shape(%arg0 : tensor<2x3x?xf32>, %arg1 : tensor) // ----- -func.func @result_shape_per_dim(%arg0 : tensor<2x3x?xf32>, %arg1 : tensor) +// Test result shape reification for an operation that implements only +// `reifyResultShapes` method of the `InferShapedTypeOpInterface`. +func.func @reify_shaped_type_using_reify_result_shapes(%arg0 : tensor<2x3x?xf32>, %arg1 : tensor) -> (index, index, index, index, index) { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index %c2 = arith.constant 2 : index - %0:2 = "test.op_with_result_shape_per_dim_interface"(%arg0, %arg1) + %0:2 = "test.reify_shaped_type_using_reify_result_shapes"(%arg0, %arg1) : (tensor<2x3x?xf32>, tensor) -> (tensor, tensor<2x3x?xf32>) %1 = tensor.dim %0#0, %c0 : tensor %2 = tensor.dim %0#0, %c1 : tensor @@ -41,7 +44,7 @@ func.func @result_shape_per_dim(%arg0 : tensor<2x3x?xf32>, %arg1 : tensor return %1, %2, %3, %4, %5 : index, index, index, index, index } -// CHECK-LABEL: func @result_shape_per_dim( +// CHECK-LABEL: func @reify_shaped_type_using_reify_result_shapes( // CHECK-SAME: %[[ARG_0:[a-z0-9]*]]: tensor<2x3x?xf32> // CHECK-SAME: %[[ARG_1:[a-z0-9]*]]: tensor) // CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index @@ -51,3 +54,127 @@ func.func @result_shape_per_dim(%arg0 : tensor<2x3x?xf32>, %arg1 : tensor, %arg1 : tensor) + -> (index, index, index, index, index) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %c2 = arith.constant 2 : index + %0:2 = "test.reify_shaped_type_using_reify_result_shapes"(%arg0, %arg1) + : (tensor<2x3x?xf32>, tensor) -> (tensor, tensor<2x3x?xf32>) + %1 = tensor.dim %0#0, %c0 : tensor + %2 = tensor.dim %0#0, %c1 : tensor + %3 = tensor.dim %0#1, %c0 : tensor<2x3x?xf32> + %4 = tensor.dim %0#1, %c1 : tensor<2x3x?xf32> + %5 = tensor.dim %0#1, %c2 : tensor<2x3x?xf32> + return %1, %2, %3, %4, %5 : index, index, index, index, index +} +// CHECK-LABEL: func @reify_shaped_type_using_reify_shape_of_result( +// CHECK-SAME: %[[ARG_0:[a-z0-9]*]]: tensor<2x3x?xf32> +// CHECK-SAME: %[[ARG_1:[a-z0-9]*]]: tensor) +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index +// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index +// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : index +// CHECK-DAG: %[[C5:.+]] = arith.constant 5 : index +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG_1]], %[[C0]] +// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG_0]], %[[C2]] +// CHECK: return %[[D0]], %[[C5]], %[[C2]], %[[C3]], %[[D1]] + +// ----- + +// Test result shape reification for an operation that implements only +// `reifyDimOfResult` method of the `InferShapedTypeOpInterface`. +func.func @reify_shaped_type_using_reify_dim_of_result(%arg0 : tensor<2x3x?xf32>, %arg1 : tensor) + -> (index, index, index, index, index) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %c2 = arith.constant 2 : index + %0:2 = "test.reify_shaped_type_using_reify_result_shapes"(%arg0, %arg1) + : (tensor<2x3x?xf32>, tensor) -> (tensor, tensor<2x3x?xf32>) + %1 = tensor.dim %0#0, %c0 : tensor + %2 = tensor.dim %0#0, %c1 : tensor + %3 = tensor.dim %0#1, %c0 : tensor<2x3x?xf32> + %4 = tensor.dim %0#1, %c1 : tensor<2x3x?xf32> + %5 = tensor.dim %0#1, %c2 : tensor<2x3x?xf32> + return %1, %2, %3, %4, %5 : index, index, index, index, index +} +// CHECK-LABEL: func @reify_shaped_type_using_reify_dim_of_result( +// CHECK-SAME: %[[ARG_0:[a-z0-9]*]]: tensor<2x3x?xf32> +// CHECK-SAME: %[[ARG_1:[a-z0-9]*]]: tensor) +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index +// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index +// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : index +// CHECK-DAG: %[[C5:.+]] = arith.constant 5 : index +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG_1]], %[[C0]] +// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG_0]], %[[C2]] +// CHECK: return %[[D0]], %[[C5]], %[[C2]], %[[C3]], %[[D1]] + +// ----- + +// This tests also indicates a problem with the approach of just using `reifyShapes` +// without being specific about {result, dim} that needs to be resolved. The +// `reifyShapes` implementations introduces `dim` operations that are effectively +// dead, but it creates an infinite loop on pattern application (which eventually +// bails on hitting the iteration limit). This is the pitfall of this legacy +// mechanism. + +func.func @test_unreifiable_result_shapes(%arg0 : tensor) + -> (index, index) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %0 = "test.unreifiable_result_shapes"(%arg0) : (tensor) -> tensor + %d0 = tensor.dim %0, %c0 : tensor + %d1 = tensor.dim %0, %c1 : tensor + return %d0, %d1 : index, index +} +// CHECK-LABEL: func @test_unreifiable_result_shapes( +// CHECK-SAME: %[[ARG0:.+]]: tensor) +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index +// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[OP:.+]] = "test.unreifiable_result_shapes"(%[[ARG0]]) +// CHECK: %[[D1:.+]] = tensor.dim %[[OP]], %[[C1]] +// CHECK: return %[[D0]], %[[D1]] +// ----- + +func.func @test_unreifiable_result_shape(%arg0 : tensor) + -> (index, index) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %0 = "test.unreifiable_result_shape"(%arg0) : (tensor) -> tensor + %d0 = tensor.dim %0, %c0 : tensor + %d1 = tensor.dim %0, %c1 : tensor + return %d0, %d1 : index, index +} +// CHECK-LABEL: func @test_unreifiable_result_shape( +// CHECK-SAME: %[[ARG0:.+]]: tensor) +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index +// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[OP:.+]] = "test.unreifiable_result_shape"(%[[ARG0]]) +// CHECK: %[[D1:.+]] = tensor.dim %[[OP]], %[[C1]] +// CHECK: return %[[D0]], %[[D1]] + +// ----- + +func.func @test_unreifiable_dim_of_result_shape(%arg0 : tensor) + -> (index, index) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %0 = "test.unreifiable_dim_of_result_shape"(%arg0) : (tensor) -> tensor + %d0 = tensor.dim %0, %c0 : tensor + %d1 = tensor.dim %0, %c1 : tensor + return %d0, %d1 : index, index +} +// CHECK-LABEL: func @test_unreifiable_dim_of_result_shape( +// CHECK-SAME: %[[ARG0:.+]]: tensor) +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index +// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[OP:.+]] = "test.unreifiable_dim_of_result_shape"(%[[ARG0]]) +// CHECK: %[[D1:.+]] = tensor.dim %[[OP]], %[[C1]] +// CHECK: return %[[D0]], %[[D1]] diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir index 3fbd9e0567948..db02918d7186c 100644 --- a/mlir/test/Target/LLVMIR/rocdl.mlir +++ b/mlir/test/Target/LLVMIR/rocdl.mlir @@ -1028,6 +1028,39 @@ llvm.func @rocdl.ds.read.tr(%ptr : !llvm.ptr<3>) -> vector<4xf16> { llvm.return %r3 : vector<4xf16> } +llvm.func @rocdl.load.tr.ops(%gl_ptr : !llvm.ptr<1>, %ds_ptr : !llvm.ptr<3>) { + // CHECK-LABEL: rocdl.load.tr.ops + // CHECK-SAME: (ptr addrspace(1) %[[GL_PTR:.+]], ptr addrspace(3) %[[DS_PTR:.+]]) + // CHECK: call <2 x i32> @llvm.amdgcn.global.load.tr4.b64.v2i32(ptr addrspace(1) %[[GL_PTR]]) + // CHECK: call <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1) %[[GL_PTR]]) + // CHECK: call <3 x i32> @llvm.amdgcn.global.load.tr6.b96.v3i32(ptr addrspace(1) %[[GL_PTR]]) + // CHECK: call <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1) %[[GL_PTR]]) + // CHECK: call <8 x half> @llvm.amdgcn.global.load.tr.b128.v8f16(ptr addrspace(1) %[[GL_PTR]]) + // CHECK: call <8 x bfloat> @llvm.amdgcn.global.load.tr.b128.v8bf16(ptr addrspace(1) %[[GL_PTR]]) + + // CHECK: call <2 x i32> @llvm.amdgcn.ds.load.tr4.b64.v2i32(ptr addrspace(3) %[[DS_PTR]]) + // CHECK: call <2 x i32> @llvm.amdgcn.ds.load.tr8.b64.v2i32(ptr addrspace(3) %[[DS_PTR]]) + // CHECK: call <3 x i32> @llvm.amdgcn.ds.load.tr6.b96.v3i32(ptr addrspace(3) %[[DS_PTR]]) + // CHECK: call <8 x i16> @llvm.amdgcn.ds.load.tr16.b128.v8i16(ptr addrspace(3) %[[DS_PTR]]) + // CHECK: call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) %[[DS_PTR]]) + // CHECK: call <8 x bfloat> @llvm.amdgcn.ds.load.tr16.b128.v8bf16(ptr addrspace(3) %[[DS_PTR]]) + + rocdl.global.load.tr4.b64 %gl_ptr : !llvm.ptr<1> -> vector<2xi32> + rocdl.global.load.tr.b64 %gl_ptr : !llvm.ptr<1> -> vector<2xi32> + rocdl.global.load.tr6.b96 %gl_ptr : !llvm.ptr<1> -> vector<3xi32> + rocdl.global.load.tr.b128 %gl_ptr : !llvm.ptr<1> -> vector<8xi16> + rocdl.global.load.tr.b128 %gl_ptr : !llvm.ptr<1> -> vector<8xf16> + rocdl.global.load.tr.b128 %gl_ptr : !llvm.ptr<1> -> vector<8xbf16> + + rocdl.ds.load.tr4.b64 %ds_ptr : !llvm.ptr<3> -> vector<2xi32> + rocdl.ds.load.tr8.b64 %ds_ptr : !llvm.ptr<3> -> vector<2xi32> + rocdl.ds.load.tr6.b96 %ds_ptr : !llvm.ptr<3> -> vector<3xi32> + rocdl.ds.load.tr16.b128 %ds_ptr : !llvm.ptr<3> -> vector<8xi16> + rocdl.ds.load.tr16.b128 %ds_ptr : !llvm.ptr<3> -> vector<8xf16> + rocdl.ds.load.tr16.b128 %ds_ptr : !llvm.ptr<3> -> vector<8xbf16> + llvm.return +} + llvm.func @rocdl.load.to.lds(%src : !llvm.ptr<7>, %dst: !llvm.ptr<3>) { //CHECK: call void @llvm.amdgcn.load.to.lds.p7 rocdl.load.to.lds %src, %dst, 4, 0, 0 : !llvm.ptr<7> diff --git a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp index e21cf94f84b66..c153211c68f92 100644 --- a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp +++ b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp @@ -320,10 +320,10 @@ LogicalResult OpWithResultShapeInterfaceOp::reifyReturnTypeShapes( } //===----------------------------------------------------------------------===// -// OpWithResultShapePerDimInterfaceOp +// ReifyShapedTypeUsingReifyResultShapesOp //===----------------------------------------------------------------------===// -LogicalResult OpWithResultShapePerDimInterfaceOp::reifyResultShapes( +LogicalResult ReifyShapedTypeUsingReifyResultShapesOp::reifyResultShapes( OpBuilder &builder, ReifiedRankedShapedTypeDims &shapes) { Location loc = getLoc(); shapes.reserve(getNumOperands()); @@ -344,6 +344,103 @@ LogicalResult OpWithResultShapePerDimInterfaceOp::reifyResultShapes( return success(); } +//===----------------------------------------------------------------------===// +// ReifyShapedTypeUsingReifyShapeOfResultOp +//===----------------------------------------------------------------------===// + +LogicalResult ReifyShapedTypeUsingReifyShapeOfResultOp::reifyResultShapes( + OpBuilder &builder, ReifiedRankedShapedTypeDims &shapes) { + return failure(); +} + +FailureOr> +ReifyShapedTypeUsingReifyShapeOfResultOp::reifyShapeOfResult(OpBuilder &builder, + int resultIndex) { + Location loc = getLoc(); + Value sourceOperand = getOperand(getNumOperands() - 1 - resultIndex); + SmallVector shape = + tensor::getMixedSizes(builder, loc, sourceOperand); + return shape; +} + +//===----------------------------------------------------------------------===// +// ReifyShapedTypeUsingReifyDimOfResultOp +//===----------------------------------------------------------------------===// + +LogicalResult ReifyShapedTypeUsingReifyDimOfResultOp::reifyResultShapes( + OpBuilder &builder, ReifiedRankedShapedTypeDims &shapes) { + return failure(); +} + +FailureOr> +ReifyShapedTypeUsingReifyDimOfResultOp::reifyShapeOfResult(OpBuilder &builder, + int resultIndex) { + return failure(); +} + +FailureOr +ReifyShapedTypeUsingReifyDimOfResultOp::reifyDimOfResult(OpBuilder &builder, + int resultIndex, + int dim) { + Location loc = getLoc(); + Value sourceOperand = getOperand(getNumOperands() - 1 - resultIndex); + OpFoldResult shape = tensor::getMixedSize(builder, loc, sourceOperand, dim); + return shape; +} + +//===----------------------------------------------------------------------===// +// UnreifableResultShapesOp +//===----------------------------------------------------------------------===// + +LogicalResult UnreifiableResultShapesOp::reifyResultShapes( + OpBuilder &builder, ReifiedRankedShapedTypeDims &shapes) { + Location loc = getLoc(); + shapes.resize(1); + shapes[0] = {tensor::getMixedSize(builder, loc, getOperand(), 0), + OpFoldResult()}; + return success(); +} + +//===----------------------------------------------------------------------===// +// UnreifableResultShapeOp +//===----------------------------------------------------------------------===// + +LogicalResult UnreifiableResultShapeOp::reifyResultShapes( + OpBuilder &builder, ReifiedRankedShapedTypeDims &shapes) { + return failure(); +} + +FailureOr> +UnreifiableResultShapeOp::reifyShapeOfResult(OpBuilder &builder, + int resultIndex) { + SmallVector shape = { + tensor::getMixedSize(builder, getLoc(), getOperand(), 0), OpFoldResult()}; + return shape; +} + +//===----------------------------------------------------------------------===// +// UnreifableResultShapeOp +//===----------------------------------------------------------------------===// + +LogicalResult UnreifiableDimOfResultShapeOp::reifyResultShapes( + OpBuilder &builder, ReifiedRankedShapedTypeDims &shapes) { + return failure(); +} + +FailureOr> +UnreifiableDimOfResultShapeOp::reifyShapeOfResult(OpBuilder &builder, + int resultIndex) { + return failure(); +} + +FailureOr +UnreifiableDimOfResultShapeOp::reifyDimOfResult(OpBuilder &builder, + int resultIndex, int dim) { + if (dim == 0) + return tensor::getMixedSize(builder, getLoc(), getOperand(), 0); + return failure(); +} + //===----------------------------------------------------------------------===// // SideEffectOp //===----------------------------------------------------------------------===// diff --git a/mlir/test/lib/Dialect/Test/TestOps.h b/mlir/test/lib/Dialect/Test/TestOps.h index 4201ade9795e7..679274346fb13 100644 --- a/mlir/test/lib/Dialect/Test/TestOps.h +++ b/mlir/test/lib/Dialect/Test/TestOps.h @@ -42,6 +42,7 @@ #include "mlir/Interfaces/ValueBoundsOpInterface.h" #include "mlir/Interfaces/ViewLikeInterface.h" #include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallVector.h" namespace test { class TestDialect; diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td index 620d950c0d2af..cae0083f728e0 100644 --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -915,13 +915,97 @@ def OpWithResultShapeInterfaceOp : TEST_Op<"op_with_result_shape_interface", let results = (outs AnyRankedTensor:$result1, AnyRankedTensor:$result2); } -def OpWithResultShapePerDimInterfaceOp : - TEST_Op<"op_with_result_shape_per_dim_interface", - [DeclareOpInterfaceMethods]> { +def ReifyShapedTypeUsingReifyResultShapesOp : + TEST_Op<"reify_shaped_type_using_reify_result_shapes", + [DeclareOpInterfaceMethods]> { + let description = [{ + Test that when resolving a single dimension of a result for an operation + that doesnt implement `reifyShapeOfResult` nor implements `reifyDimOfResult` + calls into the implementation of `reifyResultShapes` to get the required value. + The op semantics is that the first result has the same shape as the second operand + and the second result has the same shape as the first operand. + }]; + let arguments = (ins AnyRankedTensor:$operand1, AnyRankedTensor:$operand2); + let results = (outs AnyRankedTensor:$result1, AnyRankedTensor:$result2); +} + +def ReifyShapedTypeUsingReifyShapeOfResultOp : + TEST_Op<"reify_shaped_type_using_reify_shape_of_result", + [DeclareOpInterfaceMethods]> { + let description = [{ + Test that when resolving a single dimension of a result for an operation + that doesnt implement `reifyDimOfResult` but implements `reifyShapeOfResult`, which + is used to get the required value. `reifyResultShapes` is implemented as a failure + (which is also the default implementation) to ensure it is not called. + The op semantics is that the first result has the same shape as the second operand + and the second result has the same shape as the first operand. + }]; let arguments = (ins AnyRankedTensor:$operand1, AnyRankedTensor:$operand2); let results = (outs AnyRankedTensor:$result1, AnyRankedTensor:$result2); } +def ReifyShapedTypeUsingReifyDimOfResultOp : + TEST_Op<"reify_shaped_type_using_reify_dim_of_result", + [DeclareOpInterfaceMethods]> { + let description = [{ + Test that when resolving a single dimension of a result for an operation + that implements `reifyDimOfResult`, which is used to get the required value. + `reifyResultShapes` and `reifyShapeOfResult` are implemented as failures + to ensure they are not called. The op semantics is that the first result has + the same shape as the second operand and the second result has the same shape + as the first operand. + }]; + let arguments = (ins AnyRankedTensor:$operand1, AnyRankedTensor:$operand2); + let results = (outs AnyRankedTensor:$result1, AnyRankedTensor:$result2); +} + +def UnreifiableResultShapesOp : TEST_Op<"unreifiable_result_shapes", + [DeclareOpInterfaceMethods]> { + let description = [{ + Test handling of case where some dimension of the result cannot be + reified. This tests the path when `reifyResultShapes` is implemented. + + Expected that dim 0 of `result` is reifable as dim 0 of `operand`, but + dim 1 of `result` is not reifiable. + }]; + let arguments = (ins 2DTensorOf<[AnyType]>:$operand); + let results = (outs 2DTensorOf<[AnyType]>:$result); +} + +def UnreifiableResultShapeOp : TEST_Op<"unreifiable_result_shape", + [DeclareOpInterfaceMethods]> { + let description = [{ + Test handling of case where some dimension of the result cannot be + reified. This tests the path when `reifyShapeOfResult` is implemented, + but not `reifyDimOfResult` with `reifyResultShapes` implemented as a failure. + + Expected that dim 0 of `result` is reifable as dim 0 of `operand`, but + dim 1 of `result` is not reifiable. + }]; + let arguments = (ins 2DTensorOf<[AnyType]>:$operand); + let results = (outs 2DTensorOf<[AnyType]>:$result); +} + +def UnreifiableDimOfResultShapeOp : TEST_Op<"unreifiable_dim_of_result_shape", + [DeclareOpInterfaceMethods]> { + let description = [{ + Test handling of case where some dimension of the result cannot be + reified. This tests the path when `reifyDimOfResult` is implemented, + and `reifyDimOfResult` with `reifyResultShapes` are implemented as a failure. + + Expected that dim 0 of `result` is reifable as dim 0 of `operand`, but + dim 1 of `result` is not reifiable. + }]; + let arguments = (ins 2DTensorOf<[AnyType]>:$operand); + let results = (outs 2DTensorOf<[AnyType]>:$result); +} + def IsNotScalar : Constraint>; def UpdateAttr : Pat<(I32ElementsAttrOp $attr), diff --git a/mlir/test/python/dialects/transform_xegpu_ext.py b/mlir/test/python/dialects/transform_xegpu_ext.py index 1c8a2bcc6a2fb..0b587d2020aa6 100644 --- a/mlir/test/python/dialects/transform_xegpu_ext.py +++ b/mlir/test/python/dialects/transform_xegpu_ext.py @@ -3,7 +3,7 @@ from mlir.ir import * from mlir.dialects import transform from mlir.dialects.transform import xegpu -from mlir.dialects.transform import structured +from mlir.dialects.transform import AnyValueType def run(f): @@ -16,6 +16,21 @@ def run(f): return f +@run +def getDescOpDefaultIndex(): + sequence = transform.SequenceOp( + transform.FailurePropagationMode.Propagate, + [], + transform.OperationType.get("xegpu.dpas"), + ) + with InsertionPoint(sequence.body): + operand = transform.GetOperandOp(AnyValueType.get(), sequence.bodyTarget, [0]) + desc_handle = xegpu.GetDescOp(operand) + transform.YieldOp() + # CHECK-LABEL: TEST: getDescOpDefaultIndex + # CHECK: transform.xegpu.get_desc_op % + + @run def setDescLayoutMinimal(): sequence = transform.SequenceOp( @@ -49,3 +64,52 @@ def setDescLayoutInstData(): # CHECK: sg_layout = [6, 4] # CHECK: sg_data = [32, 16] # CHECK: inst_data = [8, 16] + + +@run +def setOpLayoutAttrOperandMinimal(): + sequence = transform.SequenceOp( + transform.FailurePropagationMode.Propagate, + [], + transform.OperationType.get("xegpu.dpas"), + ) + with InsertionPoint(sequence.body): + xegpu.SetOpLayoutAttrOp( + sequence.bodyTarget, + sg_layout=[6, 4], + sg_data=[32, 16], + ) + transform.YieldOp() + # CHECK-LABEL: TEST: setOpLayoutAttr + # CHECK: transform.xegpu.set_op_layout_attr % + # NO-CHECK: index = 0 + # NO-CHECK: result + # CHECK: sg_layout = [6, 4] + # CHECK: sg_data = [32, 16] + # NO-CHECK: inst_data + + +@run +def setOpLayoutAttrResult(): + sequence = transform.SequenceOp( + transform.FailurePropagationMode.Propagate, + [], + transform.OperationType.get("xegpu.dpas"), + ) + with InsertionPoint(sequence.body): + xegpu.SetOpLayoutAttrOp( + sequence.bodyTarget, + index=0, + sg_layout=[6, 4], + sg_data=[32, 16], + inst_data=[8, 16], + result=True, + ) + transform.YieldOp() + # CHECK-LABEL: TEST: setOpLayoutAttr + # CHECK: transform.xegpu.set_op_layout_attr % + # NO-CHECK: index = 0 + # CHECK: result + # CHECK: sg_layout = [6, 4] + # CHECK: sg_data = [32, 16] + # CHECK: inst_data = [8, 16] diff --git a/offload/test/offloading/gpupgo/pgo_atomic_teams.c b/offload/test/offloading/gpupgo/pgo_atomic_teams.c index 42d8ae43beba1..b3b72db080392 100644 --- a/offload/test/offloading/gpupgo/pgo_atomic_teams.c +++ b/offload/test/offloading/gpupgo/pgo_atomic_teams.c @@ -18,7 +18,6 @@ // REQUIRES: amdgpu // REQUIRES: pgo -// XFAIL: amdgpu int test1(int a) { return a / 2; } int test2(int a) { return a * 2; } diff --git a/offload/test/offloading/gpupgo/pgo_atomic_threads.c b/offload/test/offloading/gpupgo/pgo_atomic_threads.c index 09a4dc1577822..440a6b533317d 100644 --- a/offload/test/offloading/gpupgo/pgo_atomic_threads.c +++ b/offload/test/offloading/gpupgo/pgo_atomic_threads.c @@ -18,7 +18,6 @@ // REQUIRES: amdgpu // REQUIRES: pgo -// XFAIL: amdgpu int test1(int a) { return a / 2; } diff --git a/offload/test/offloading/gpupgo/pgo_device_and_host.c b/offload/test/offloading/gpupgo/pgo_device_and_host.c index c53e69a25e50d..3e95791ce9a50 100644 --- a/offload/test/offloading/gpupgo/pgo_device_and_host.c +++ b/offload/test/offloading/gpupgo/pgo_device_and_host.c @@ -50,7 +50,6 @@ // REQUIRES: amdgpu // REQUIRES: pgo -// XFAIL: amdgpu int main() { int host_var = 0; diff --git a/offload/test/offloading/gpupgo/pgo_device_only.c b/offload/test/offloading/gpupgo/pgo_device_only.c index 644df6e7b0339..2939af613b6dd 100644 --- a/offload/test/offloading/gpupgo/pgo_device_only.c +++ b/offload/test/offloading/gpupgo/pgo_device_only.c @@ -16,7 +16,6 @@ // REQUIRES: amdgpu // REQUIRES: pgo -// XFAIL: amdgpu int test1(int a) { return a / 2; } int test2(int a) { return a * 2; } diff --git a/revert_patches.txt b/revert_patches.txt index 9e465ba90ae6a..b88b846f64b68 100644 --- a/revert_patches.txt +++ b/revert_patches.txt @@ -5,3 +5,6 @@ d57230c7 [AMDGPU][MC] Disallow op_sel in some VOP3P dot instructions (#100485) breaks build of ROCmValidationSuite [C2y] Support WG14 N3457, the __COUNTER__ macro (#162662) --- +breaks fortran declare-target-link1 +[OMPIRBuilder] Fix addrspace of internal critical section lock (#166459 +--- diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel index 3e7719c0d03c7..67c397e34b8c7 100644 --- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel @@ -1729,6 +1729,7 @@ cc_library( copts = llvm_copts, deps = [ ":Analysis", + ":Core", ":ProfileData", ":Support", ":TargetParser", diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel index a4ea627fb3d16..7066f498c7d49 100644 --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -3888,6 +3888,7 @@ cc_library( ":XeGPUDialect", ":XeGPUTransformOpsIncGen", ":XeGPUUtils", + "//llvm:Support", ], )