1use crate::{2 core_arch::{simd::*, x86::*},3 intrinsics::simd::*,4 ptr,5};67#[cfg(test)]8use stdarch_test::assert_instr;910/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst.11///12/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi16&expand=30)13#[inline]14#[target_feature(enable = "avx512bw")]15#[stable(feature = "stdarch_x86_avx512", since = "1.89")]16#[cfg_attr(test, assert_instr(vpabsw))]17#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]18pub const fn _mm512_abs_epi16(a: __m512i) -> __m512i {19 unsafe {20 let a = a.as_i16x32();21 let cmp: i16x32 = simd_gt(a, i16x32::ZERO);22 transmute(simd_select(cmp, a, simd_neg(a)))23 }24}2526/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).27///28/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi16&expand=31)29#[inline]30#[target_feature(enable = "avx512bw")]31#[stable(feature = "stdarch_x86_avx512", since = "1.89")]32#[cfg_attr(test, assert_instr(vpabsw))]33#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]34pub const fn _mm512_mask_abs_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i {35 unsafe {36 let abs = _mm512_abs_epi16(a).as_i16x32();37 transmute(simd_select_bitmask(k, abs, src.as_i16x32()))38 }39}4041/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).42///43/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi16&expand=32)44#[inline]45#[target_feature(enable = "avx512bw")]46#[stable(feature = "stdarch_x86_avx512", since = "1.89")]47#[cfg_attr(test, assert_instr(vpabsw))]48#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]49pub const fn _mm512_maskz_abs_epi16(k: __mmask32, a: __m512i) -> __m512i {50 unsafe {51 let abs = _mm512_abs_epi16(a).as_i16x32();52 transmute(simd_select_bitmask(k, abs, i16x32::ZERO))53 }54}5556/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).57///58/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi16&expand=28)59#[inline]60#[target_feature(enable = "avx512bw,avx512vl")]61#[stable(feature = "stdarch_x86_avx512", since = "1.89")]62#[cfg_attr(test, assert_instr(vpabsw))]63#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]64pub const fn _mm256_mask_abs_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i {65 unsafe {66 let abs = _mm256_abs_epi16(a).as_i16x16();67 transmute(simd_select_bitmask(k, abs, src.as_i16x16()))68 }69}7071/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).72///73/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_abs_epi16&expand=29)74#[inline]75#[target_feature(enable = "avx512bw,avx512vl")]76#[stable(feature = "stdarch_x86_avx512", since = "1.89")]77#[cfg_attr(test, assert_instr(vpabsw))]78#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]79pub const fn _mm256_maskz_abs_epi16(k: __mmask16, a: __m256i) -> __m256i {80 unsafe {81 let abs = _mm256_abs_epi16(a).as_i16x16();82 transmute(simd_select_bitmask(k, abs, i16x16::ZERO))83 }84}8586/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).87///88/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_abs_epi16&expand=25)89#[inline]90#[target_feature(enable = "avx512bw,avx512vl")]91#[stable(feature = "stdarch_x86_avx512", since = "1.89")]92#[cfg_attr(test, assert_instr(vpabsw))]93#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]94pub const fn _mm_mask_abs_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i {95 unsafe {96 let abs = _mm_abs_epi16(a).as_i16x8();97 transmute(simd_select_bitmask(k, abs, src.as_i16x8()))98 }99}100101/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).102///103/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_abs_epi16&expand=26)104#[inline]105#[target_feature(enable = "avx512bw,avx512vl")]106#[stable(feature = "stdarch_x86_avx512", since = "1.89")]107#[cfg_attr(test, assert_instr(vpabsw))]108#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]109pub const fn _mm_maskz_abs_epi16(k: __mmask8, a: __m128i) -> __m128i {110 unsafe {111 let abs = _mm_abs_epi16(a).as_i16x8();112 transmute(simd_select_bitmask(k, abs, i16x8::ZERO))113 }114}115116/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst.117///118/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi8&expand=57)119#[inline]120#[target_feature(enable = "avx512bw")]121#[stable(feature = "stdarch_x86_avx512", since = "1.89")]122#[cfg_attr(test, assert_instr(vpabsb))]123#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]124pub const fn _mm512_abs_epi8(a: __m512i) -> __m512i {125 unsafe {126 let a = a.as_i8x64();127 let cmp: i8x64 = simd_gt(a, i8x64::ZERO);128 transmute(simd_select(cmp, a, simd_neg(a)))129 }130}131132/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).133///134/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi8&expand=58)135#[inline]136#[target_feature(enable = "avx512bw")]137#[stable(feature = "stdarch_x86_avx512", since = "1.89")]138#[cfg_attr(test, assert_instr(vpabsb))]139#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]140pub const fn _mm512_mask_abs_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i {141 unsafe {142 let abs = _mm512_abs_epi8(a).as_i8x64();143 transmute(simd_select_bitmask(k, abs, src.as_i8x64()))144 }145}146147/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).148///149/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi8&expand=59)150#[inline]151#[target_feature(enable = "avx512bw")]152#[stable(feature = "stdarch_x86_avx512", since = "1.89")]153#[cfg_attr(test, assert_instr(vpabsb))]154#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]155pub const fn _mm512_maskz_abs_epi8(k: __mmask64, a: __m512i) -> __m512i {156 unsafe {157 let abs = _mm512_abs_epi8(a).as_i8x64();158 transmute(simd_select_bitmask(k, abs, i8x64::ZERO))159 }160}161162/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).163///164/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi8&expand=55)165#[inline]166#[target_feature(enable = "avx512bw,avx512vl")]167#[stable(feature = "stdarch_x86_avx512", since = "1.89")]168#[cfg_attr(test, assert_instr(vpabsb))]169#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]170pub const fn _mm256_mask_abs_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i {171 unsafe {172 let abs = _mm256_abs_epi8(a).as_i8x32();173 transmute(simd_select_bitmask(k, abs, src.as_i8x32()))174 }175}176177/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).178///179/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_abs_epi8&expand=56)180#[inline]181#[target_feature(enable = "avx512bw,avx512vl")]182#[stable(feature = "stdarch_x86_avx512", since = "1.89")]183#[cfg_attr(test, assert_instr(vpabsb))]184#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]185pub const fn _mm256_maskz_abs_epi8(k: __mmask32, a: __m256i) -> __m256i {186 unsafe {187 let abs = _mm256_abs_epi8(a).as_i8x32();188 transmute(simd_select_bitmask(k, abs, i8x32::ZERO))189 }190}191192/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set)193///194/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_abs_epi8&expand=52)195#[inline]196#[target_feature(enable = "avx512bw,avx512vl")]197#[stable(feature = "stdarch_x86_avx512", since = "1.89")]198#[cfg_attr(test, assert_instr(vpabsb))]199#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]200pub const fn _mm_mask_abs_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i {201 unsafe {202 let abs = _mm_abs_epi8(a).as_i8x16();203 transmute(simd_select_bitmask(k, abs, src.as_i8x16()))204 }205}206207/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).208///209/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_abs_epi8&expand=53)210#[inline]211#[target_feature(enable = "avx512bw,avx512vl")]212#[stable(feature = "stdarch_x86_avx512", since = "1.89")]213#[cfg_attr(test, assert_instr(vpabsb))]214#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]215pub const fn _mm_maskz_abs_epi8(k: __mmask16, a: __m128i) -> __m128i {216 unsafe {217 let abs = _mm_abs_epi8(a).as_i8x16();218 transmute(simd_select_bitmask(k, abs, i8x16::ZERO))219 }220}221222/// Add packed 16-bit integers in a and b, and store the results in dst.223///224/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi16&expand=91)225#[inline]226#[target_feature(enable = "avx512bw")]227#[stable(feature = "stdarch_x86_avx512", since = "1.89")]228#[cfg_attr(test, assert_instr(vpaddw))]229#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]230pub const fn _mm512_add_epi16(a: __m512i, b: __m512i) -> __m512i {231 unsafe { transmute(simd_add(a.as_i16x32(), b.as_i16x32())) }232}233234/// Add packed 16-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).235///236/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi16&expand=92)237#[inline]238#[target_feature(enable = "avx512bw")]239#[stable(feature = "stdarch_x86_avx512", since = "1.89")]240#[cfg_attr(test, assert_instr(vpaddw))]241#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]242pub const fn _mm512_mask_add_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i {243 unsafe {244 let add = _mm512_add_epi16(a, b).as_i16x32();245 transmute(simd_select_bitmask(k, add, src.as_i16x32()))246 }247}248249/// Add packed 16-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).250///251/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi16&expand=93)252#[inline]253#[target_feature(enable = "avx512bw")]254#[stable(feature = "stdarch_x86_avx512", since = "1.89")]255#[cfg_attr(test, assert_instr(vpaddw))]256#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]257pub const fn _mm512_maskz_add_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {258 unsafe {259 let add = _mm512_add_epi16(a, b).as_i16x32();260 transmute(simd_select_bitmask(k, add, i16x32::ZERO))261 }262}263264/// Add packed 16-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).265///266/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi16&expand=89)267#[inline]268#[target_feature(enable = "avx512bw,avx512vl")]269#[stable(feature = "stdarch_x86_avx512", since = "1.89")]270#[cfg_attr(test, assert_instr(vpaddw))]271#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]272pub const fn _mm256_mask_add_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i {273 unsafe {274 let add = _mm256_add_epi16(a, b).as_i16x16();275 transmute(simd_select_bitmask(k, add, src.as_i16x16()))276 }277}278279/// Add packed 16-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).280///281/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi16&expand=90)282#[inline]283#[target_feature(enable = "avx512bw,avx512vl")]284#[stable(feature = "stdarch_x86_avx512", since = "1.89")]285#[cfg_attr(test, assert_instr(vpaddw))]286#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]287pub const fn _mm256_maskz_add_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {288 unsafe {289 let add = _mm256_add_epi16(a, b).as_i16x16();290 transmute(simd_select_bitmask(k, add, i16x16::ZERO))291 }292}293294/// Add packed 16-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).295///296/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi16&expand=86)297#[inline]298#[target_feature(enable = "avx512bw,avx512vl")]299#[stable(feature = "stdarch_x86_avx512", since = "1.89")]300#[cfg_attr(test, assert_instr(vpaddw))]301#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]302pub const fn _mm_mask_add_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {303 unsafe {304 let add = _mm_add_epi16(a, b).as_i16x8();305 transmute(simd_select_bitmask(k, add, src.as_i16x8()))306 }307}308309/// Add packed 16-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).310///311/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi16&expand=87)312#[inline]313#[target_feature(enable = "avx512bw,avx512vl")]314#[stable(feature = "stdarch_x86_avx512", since = "1.89")]315#[cfg_attr(test, assert_instr(vpaddw))]316#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]317pub const fn _mm_maskz_add_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {318 unsafe {319 let add = _mm_add_epi16(a, b).as_i16x8();320 transmute(simd_select_bitmask(k, add, i16x8::ZERO))321 }322}323324/// Add packed 8-bit integers in a and b, and store the results in dst.325///326/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi8&expand=118)327#[inline]328#[target_feature(enable = "avx512bw")]329#[stable(feature = "stdarch_x86_avx512", since = "1.89")]330#[cfg_attr(test, assert_instr(vpaddb))]331#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]332pub const fn _mm512_add_epi8(a: __m512i, b: __m512i) -> __m512i {333 unsafe { transmute(simd_add(a.as_i8x64(), b.as_i8x64())) }334}335336/// Add packed 8-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).337///338/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi8&expand=119)339#[inline]340#[target_feature(enable = "avx512bw")]341#[stable(feature = "stdarch_x86_avx512", since = "1.89")]342#[cfg_attr(test, assert_instr(vpaddb))]343#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]344pub const fn _mm512_mask_add_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i {345 unsafe {346 let add = _mm512_add_epi8(a, b).as_i8x64();347 transmute(simd_select_bitmask(k, add, src.as_i8x64()))348 }349}350351/// Add packed 8-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).352///353/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi8&expand=120)354#[inline]355#[target_feature(enable = "avx512bw")]356#[stable(feature = "stdarch_x86_avx512", since = "1.89")]357#[cfg_attr(test, assert_instr(vpaddb))]358#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]359pub const fn _mm512_maskz_add_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i {360 unsafe {361 let add = _mm512_add_epi8(a, b).as_i8x64();362 transmute(simd_select_bitmask(k, add, i8x64::ZERO))363 }364}365366/// Add packed 8-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).367///368/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi8&expand=116)369#[inline]370#[target_feature(enable = "avx512bw,avx512vl")]371#[stable(feature = "stdarch_x86_avx512", since = "1.89")]372#[cfg_attr(test, assert_instr(vpaddb))]373#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]374pub const fn _mm256_mask_add_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i {375 unsafe {376 let add = _mm256_add_epi8(a, b).as_i8x32();377 transmute(simd_select_bitmask(k, add, src.as_i8x32()))378 }379}380381/// Add packed 8-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).382///383/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi8&expand=117)384#[inline]385#[target_feature(enable = "avx512bw,avx512vl")]386#[stable(feature = "stdarch_x86_avx512", since = "1.89")]387#[cfg_attr(test, assert_instr(vpaddb))]388#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]389pub const fn _mm256_maskz_add_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i {390 unsafe {391 let add = _mm256_add_epi8(a, b).as_i8x32();392 transmute(simd_select_bitmask(k, add, i8x32::ZERO))393 }394}395396/// Add packed 8-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).397///398/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi8&expand=113)399#[inline]400#[target_feature(enable = "avx512bw,avx512vl")]401#[stable(feature = "stdarch_x86_avx512", since = "1.89")]402#[cfg_attr(test, assert_instr(vpaddb))]403#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]404pub const fn _mm_mask_add_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i {405 unsafe {406 let add = _mm_add_epi8(a, b).as_i8x16();407 transmute(simd_select_bitmask(k, add, src.as_i8x16()))408 }409}410411/// Add packed 8-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).412///413/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi8&expand=114)414#[inline]415#[target_feature(enable = "avx512bw,avx512vl")]416#[stable(feature = "stdarch_x86_avx512", since = "1.89")]417#[cfg_attr(test, assert_instr(vpaddb))]418#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]419pub const fn _mm_maskz_add_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i {420 unsafe {421 let add = _mm_add_epi8(a, b).as_i8x16();422 transmute(simd_select_bitmask(k, add, i8x16::ZERO))423 }424}425426/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst.427///428/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epu16&expand=197)429#[inline]430#[target_feature(enable = "avx512bw")]431#[stable(feature = "stdarch_x86_avx512", since = "1.89")]432#[cfg_attr(test, assert_instr(vpaddusw))]433#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]434pub const fn _mm512_adds_epu16(a: __m512i, b: __m512i) -> __m512i {435 unsafe { transmute(simd_saturating_add(a.as_u16x32(), b.as_u16x32())) }436}437438/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).439///440/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epu16&expand=198)441#[inline]442#[target_feature(enable = "avx512bw")]443#[stable(feature = "stdarch_x86_avx512", since = "1.89")]444#[cfg_attr(test, assert_instr(vpaddusw))]445#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]446pub const fn _mm512_mask_adds_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i {447 unsafe {448 let add = _mm512_adds_epu16(a, b).as_u16x32();449 transmute(simd_select_bitmask(k, add, src.as_u16x32()))450 }451}452453/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).454///455/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epu16&expand=199)456#[inline]457#[target_feature(enable = "avx512bw")]458#[stable(feature = "stdarch_x86_avx512", since = "1.89")]459#[cfg_attr(test, assert_instr(vpaddusw))]460#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]461pub const fn _mm512_maskz_adds_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {462 unsafe {463 let add = _mm512_adds_epu16(a, b).as_u16x32();464 transmute(simd_select_bitmask(k, add, u16x32::ZERO))465 }466}467468/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).469///470/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epu16&expand=195)471#[inline]472#[target_feature(enable = "avx512bw,avx512vl")]473#[stable(feature = "stdarch_x86_avx512", since = "1.89")]474#[cfg_attr(test, assert_instr(vpaddusw))]475#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]476pub const fn _mm256_mask_adds_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i {477 unsafe {478 let add = _mm256_adds_epu16(a, b).as_u16x16();479 transmute(simd_select_bitmask(k, add, src.as_u16x16()))480 }481}482483/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).484///485/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epu16&expand=196)486#[inline]487#[target_feature(enable = "avx512bw,avx512vl")]488#[stable(feature = "stdarch_x86_avx512", since = "1.89")]489#[cfg_attr(test, assert_instr(vpaddusw))]490#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]491pub const fn _mm256_maskz_adds_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {492 unsafe {493 let add = _mm256_adds_epu16(a, b).as_u16x16();494 transmute(simd_select_bitmask(k, add, u16x16::ZERO))495 }496}497498/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).499///500/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epu16&expand=192)501#[inline]502#[target_feature(enable = "avx512bw,avx512vl")]503#[stable(feature = "stdarch_x86_avx512", since = "1.89")]504#[cfg_attr(test, assert_instr(vpaddusw))]505#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]506pub const fn _mm_mask_adds_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {507 unsafe {508 let add = _mm_adds_epu16(a, b).as_u16x8();509 transmute(simd_select_bitmask(k, add, src.as_u16x8()))510 }511}512513/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).514///515/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epu16&expand=193)516#[inline]517#[target_feature(enable = "avx512bw,avx512vl")]518#[stable(feature = "stdarch_x86_avx512", since = "1.89")]519#[cfg_attr(test, assert_instr(vpaddusw))]520#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]521pub const fn _mm_maskz_adds_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {522 unsafe {523 let add = _mm_adds_epu16(a, b).as_u16x8();524 transmute(simd_select_bitmask(k, add, u16x8::ZERO))525 }526}527528/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst.529///530/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epu8&expand=206)531#[inline]532#[target_feature(enable = "avx512bw")]533#[stable(feature = "stdarch_x86_avx512", since = "1.89")]534#[cfg_attr(test, assert_instr(vpaddusb))]535#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]536pub const fn _mm512_adds_epu8(a: __m512i, b: __m512i) -> __m512i {537 unsafe { transmute(simd_saturating_add(a.as_u8x64(), b.as_u8x64())) }538}539540/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).541///542/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epu8&expand=207)543#[inline]544#[target_feature(enable = "avx512bw")]545#[stable(feature = "stdarch_x86_avx512", since = "1.89")]546#[cfg_attr(test, assert_instr(vpaddusb))]547#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]548pub const fn _mm512_mask_adds_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i {549 unsafe {550 let add = _mm512_adds_epu8(a, b).as_u8x64();551 transmute(simd_select_bitmask(k, add, src.as_u8x64()))552 }553}554555/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).556///557/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epu8&expand=208)558#[inline]559#[target_feature(enable = "avx512bw")]560#[stable(feature = "stdarch_x86_avx512", since = "1.89")]561#[cfg_attr(test, assert_instr(vpaddusb))]562#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]563pub const fn _mm512_maskz_adds_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i {564 unsafe {565 let add = _mm512_adds_epu8(a, b).as_u8x64();566 transmute(simd_select_bitmask(k, add, u8x64::ZERO))567 }568}569570/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).571///572/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epu8&expand=204)573#[inline]574#[target_feature(enable = "avx512bw,avx512vl")]575#[stable(feature = "stdarch_x86_avx512", since = "1.89")]576#[cfg_attr(test, assert_instr(vpaddusb))]577#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]578pub const fn _mm256_mask_adds_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i {579 unsafe {580 let add = _mm256_adds_epu8(a, b).as_u8x32();581 transmute(simd_select_bitmask(k, add, src.as_u8x32()))582 }583}584585/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).586///587/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epu8&expand=205)588#[inline]589#[target_feature(enable = "avx512bw,avx512vl")]590#[stable(feature = "stdarch_x86_avx512", since = "1.89")]591#[cfg_attr(test, assert_instr(vpaddusb))]592#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]593pub const fn _mm256_maskz_adds_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i {594 unsafe {595 let add = _mm256_adds_epu8(a, b).as_u8x32();596 transmute(simd_select_bitmask(k, add, u8x32::ZERO))597 }598}599600/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).601///602/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epu8&expand=201)603#[inline]604#[target_feature(enable = "avx512bw,avx512vl")]605#[stable(feature = "stdarch_x86_avx512", since = "1.89")]606#[cfg_attr(test, assert_instr(vpaddusb))]607#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]608pub const fn _mm_mask_adds_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i {609 unsafe {610 let add = _mm_adds_epu8(a, b).as_u8x16();611 transmute(simd_select_bitmask(k, add, src.as_u8x16()))612 }613}614615/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).616///617/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epu8&expand=202)618#[inline]619#[target_feature(enable = "avx512bw,avx512vl")]620#[stable(feature = "stdarch_x86_avx512", since = "1.89")]621#[cfg_attr(test, assert_instr(vpaddusb))]622#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]623pub const fn _mm_maskz_adds_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i {624 unsafe {625 let add = _mm_adds_epu8(a, b).as_u8x16();626 transmute(simd_select_bitmask(k, add, u8x16::ZERO))627 }628}629630/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst.631///632/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epi16&expand=179)633#[inline]634#[target_feature(enable = "avx512bw")]635#[stable(feature = "stdarch_x86_avx512", since = "1.89")]636#[cfg_attr(test, assert_instr(vpaddsw))]637#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]638pub const fn _mm512_adds_epi16(a: __m512i, b: __m512i) -> __m512i {639 unsafe { transmute(simd_saturating_add(a.as_i16x32(), b.as_i16x32())) }640}641642/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).643///644/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epi16&expand=180)645#[inline]646#[target_feature(enable = "avx512bw")]647#[stable(feature = "stdarch_x86_avx512", since = "1.89")]648#[cfg_attr(test, assert_instr(vpaddsw))]649#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]650pub const fn _mm512_mask_adds_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i {651 unsafe {652 let add = _mm512_adds_epi16(a, b).as_i16x32();653 transmute(simd_select_bitmask(k, add, src.as_i16x32()))654 }655}656657/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).658///659/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epi16&expand=181)660#[inline]661#[target_feature(enable = "avx512bw")]662#[stable(feature = "stdarch_x86_avx512", since = "1.89")]663#[cfg_attr(test, assert_instr(vpaddsw))]664#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]665pub const fn _mm512_maskz_adds_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {666 unsafe {667 let add = _mm512_adds_epi16(a, b).as_i16x32();668 transmute(simd_select_bitmask(k, add, i16x32::ZERO))669 }670}671672/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).673///674/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epi16&expand=177)675#[inline]676#[target_feature(enable = "avx512bw,avx512vl")]677#[stable(feature = "stdarch_x86_avx512", since = "1.89")]678#[cfg_attr(test, assert_instr(vpaddsw))]679#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]680pub const fn _mm256_mask_adds_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i {681 unsafe {682 let add = _mm256_adds_epi16(a, b).as_i16x16();683 transmute(simd_select_bitmask(k, add, src.as_i16x16()))684 }685}686687/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).688///689/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epi16&expand=178)690#[inline]691#[target_feature(enable = "avx512bw,avx512vl")]692#[stable(feature = "stdarch_x86_avx512", since = "1.89")]693#[cfg_attr(test, assert_instr(vpaddsw))]694#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]695pub const fn _mm256_maskz_adds_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {696 unsafe {697 let add = _mm256_adds_epi16(a, b).as_i16x16();698 transmute(simd_select_bitmask(k, add, i16x16::ZERO))699 }700}701702/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).703///704/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epi16&expand=174)705#[inline]706#[target_feature(enable = "avx512bw,avx512vl")]707#[stable(feature = "stdarch_x86_avx512", since = "1.89")]708#[cfg_attr(test, assert_instr(vpaddsw))]709#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]710pub const fn _mm_mask_adds_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {711 unsafe {712 let add = _mm_adds_epi16(a, b).as_i16x8();713 transmute(simd_select_bitmask(k, add, src.as_i16x8()))714 }715}716717/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).718///719/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epi16&expand=175)720#[inline]721#[target_feature(enable = "avx512bw,avx512vl")]722#[stable(feature = "stdarch_x86_avx512", since = "1.89")]723#[cfg_attr(test, assert_instr(vpaddsw))]724#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]725pub const fn _mm_maskz_adds_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {726 unsafe {727 let add = _mm_adds_epi16(a, b).as_i16x8();728 transmute(simd_select_bitmask(k, add, i16x8::ZERO))729 }730}731732/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst.733///734/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epi8&expand=188)735#[inline]736#[target_feature(enable = "avx512bw")]737#[stable(feature = "stdarch_x86_avx512", since = "1.89")]738#[cfg_attr(test, assert_instr(vpaddsb))]739#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]740pub const fn _mm512_adds_epi8(a: __m512i, b: __m512i) -> __m512i {741 unsafe { transmute(simd_saturating_add(a.as_i8x64(), b.as_i8x64())) }742}743744/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).745///746/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epi8&expand=189)747#[inline]748#[target_feature(enable = "avx512bw")]749#[stable(feature = "stdarch_x86_avx512", since = "1.89")]750#[cfg_attr(test, assert_instr(vpaddsb))]751#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]752pub const fn _mm512_mask_adds_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i {753 unsafe {754 let add = _mm512_adds_epi8(a, b).as_i8x64();755 transmute(simd_select_bitmask(k, add, src.as_i8x64()))756 }757}758759/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).760///761/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epi8&expand=190)762#[inline]763#[target_feature(enable = "avx512bw")]764#[stable(feature = "stdarch_x86_avx512", since = "1.89")]765#[cfg_attr(test, assert_instr(vpaddsb))]766#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]767pub const fn _mm512_maskz_adds_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i {768 unsafe {769 let add = _mm512_adds_epi8(a, b).as_i8x64();770 transmute(simd_select_bitmask(k, add, i8x64::ZERO))771 }772}773774/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).775///776/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epi8&expand=186)777#[inline]778#[target_feature(enable = "avx512bw,avx512vl")]779#[stable(feature = "stdarch_x86_avx512", since = "1.89")]780#[cfg_attr(test, assert_instr(vpaddsb))]781#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]782pub const fn _mm256_mask_adds_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i {783 unsafe {784 let add = _mm256_adds_epi8(a, b).as_i8x32();785 transmute(simd_select_bitmask(k, add, src.as_i8x32()))786 }787}788789/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).790///791/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epi8&expand=187)792#[inline]793#[target_feature(enable = "avx512bw,avx512vl")]794#[stable(feature = "stdarch_x86_avx512", since = "1.89")]795#[cfg_attr(test, assert_instr(vpaddsb))]796#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]797pub const fn _mm256_maskz_adds_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i {798 unsafe {799 let add = _mm256_adds_epi8(a, b).as_i8x32();800 transmute(simd_select_bitmask(k, add, i8x32::ZERO))801 }802}803804/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).805///806/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epi8&expand=183)807#[inline]808#[target_feature(enable = "avx512bw,avx512vl")]809#[stable(feature = "stdarch_x86_avx512", since = "1.89")]810#[cfg_attr(test, assert_instr(vpaddsb))]811#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]812pub const fn _mm_mask_adds_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i {813 unsafe {814 let add = _mm_adds_epi8(a, b).as_i8x16();815 transmute(simd_select_bitmask(k, add, src.as_i8x16()))816 }817}818819/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).820///821/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epi8&expand=184)822#[inline]823#[target_feature(enable = "avx512bw,avx512vl")]824#[stable(feature = "stdarch_x86_avx512", since = "1.89")]825#[cfg_attr(test, assert_instr(vpaddsb))]826#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]827pub const fn _mm_maskz_adds_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i {828 unsafe {829 let add = _mm_adds_epi8(a, b).as_i8x16();830 transmute(simd_select_bitmask(k, add, i8x16::ZERO))831 }832}833834/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst.835///836/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi16&expand=5685)837#[inline]838#[target_feature(enable = "avx512bw")]839#[stable(feature = "stdarch_x86_avx512", since = "1.89")]840#[cfg_attr(test, assert_instr(vpsubw))]841#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]842pub const fn _mm512_sub_epi16(a: __m512i, b: __m512i) -> __m512i {843 unsafe { transmute(simd_sub(a.as_i16x32(), b.as_i16x32())) }844}845846/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).847///848/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi16&expand=5683)849#[inline]850#[target_feature(enable = "avx512bw")]851#[stable(feature = "stdarch_x86_avx512", since = "1.89")]852#[cfg_attr(test, assert_instr(vpsubw))]853#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]854pub const fn _mm512_mask_sub_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i {855 unsafe {856 let sub = _mm512_sub_epi16(a, b).as_i16x32();857 transmute(simd_select_bitmask(k, sub, src.as_i16x32()))858 }859}860861/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).862///863/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi16&expand=5684)864#[inline]865#[target_feature(enable = "avx512bw")]866#[stable(feature = "stdarch_x86_avx512", since = "1.89")]867#[cfg_attr(test, assert_instr(vpsubw))]868#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]869pub const fn _mm512_maskz_sub_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {870 unsafe {871 let sub = _mm512_sub_epi16(a, b).as_i16x32();872 transmute(simd_select_bitmask(k, sub, i16x32::ZERO))873 }874}875876/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).877///878/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi16&expand=5680)879#[inline]880#[target_feature(enable = "avx512bw,avx512vl")]881#[stable(feature = "stdarch_x86_avx512", since = "1.89")]882#[cfg_attr(test, assert_instr(vpsubw))]883#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]884pub const fn _mm256_mask_sub_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i {885 unsafe {886 let sub = _mm256_sub_epi16(a, b).as_i16x16();887 transmute(simd_select_bitmask(k, sub, src.as_i16x16()))888 }889}890891/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).892///893/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi16&expand=5681)894#[inline]895#[target_feature(enable = "avx512bw,avx512vl")]896#[stable(feature = "stdarch_x86_avx512", since = "1.89")]897#[cfg_attr(test, assert_instr(vpsubw))]898#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]899pub const fn _mm256_maskz_sub_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {900 unsafe {901 let sub = _mm256_sub_epi16(a, b).as_i16x16();902 transmute(simd_select_bitmask(k, sub, i16x16::ZERO))903 }904}905906/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).907///908/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi16&expand=5677)909#[inline]910#[target_feature(enable = "avx512bw,avx512vl")]911#[stable(feature = "stdarch_x86_avx512", since = "1.89")]912#[cfg_attr(test, assert_instr(vpsubw))]913#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]914pub const fn _mm_mask_sub_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {915 unsafe {916 let sub = _mm_sub_epi16(a, b).as_i16x8();917 transmute(simd_select_bitmask(k, sub, src.as_i16x8()))918 }919}920921/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).922///923/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi16&expand=5678)924#[inline]925#[target_feature(enable = "avx512bw,avx512vl")]926#[stable(feature = "stdarch_x86_avx512", since = "1.89")]927#[cfg_attr(test, assert_instr(vpsubw))]928#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]929pub const fn _mm_maskz_sub_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {930 unsafe {931 let sub = _mm_sub_epi16(a, b).as_i16x8();932 transmute(simd_select_bitmask(k, sub, i16x8::ZERO))933 }934}935936/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst.937///938/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi8&expand=5712)939#[inline]940#[target_feature(enable = "avx512bw")]941#[stable(feature = "stdarch_x86_avx512", since = "1.89")]942#[cfg_attr(test, assert_instr(vpsubb))]943#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]944pub const fn _mm512_sub_epi8(a: __m512i, b: __m512i) -> __m512i {945 unsafe { transmute(simd_sub(a.as_i8x64(), b.as_i8x64())) }946}947948/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).949///950/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi8&expand=5710)951#[inline]952#[target_feature(enable = "avx512bw")]953#[stable(feature = "stdarch_x86_avx512", since = "1.89")]954#[cfg_attr(test, assert_instr(vpsubb))]955#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]956pub const fn _mm512_mask_sub_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i {957 unsafe {958 let sub = _mm512_sub_epi8(a, b).as_i8x64();959 transmute(simd_select_bitmask(k, sub, src.as_i8x64()))960 }961}962963/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).964///965/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi8&expand=5711)966#[inline]967#[target_feature(enable = "avx512bw")]968#[stable(feature = "stdarch_x86_avx512", since = "1.89")]969#[cfg_attr(test, assert_instr(vpsubb))]970#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]971pub const fn _mm512_maskz_sub_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i {972 unsafe {973 let sub = _mm512_sub_epi8(a, b).as_i8x64();974 transmute(simd_select_bitmask(k, sub, i8x64::ZERO))975 }976}977978/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).979///980/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi8&expand=5707)981#[inline]982#[target_feature(enable = "avx512bw,avx512vl")]983#[stable(feature = "stdarch_x86_avx512", since = "1.89")]984#[cfg_attr(test, assert_instr(vpsubb))]985#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]986pub const fn _mm256_mask_sub_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i {987 unsafe {988 let sub = _mm256_sub_epi8(a, b).as_i8x32();989 transmute(simd_select_bitmask(k, sub, src.as_i8x32()))990 }991}992993/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).994///995/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi8&expand=5708)996#[inline]997#[target_feature(enable = "avx512bw,avx512vl")]998#[stable(feature = "stdarch_x86_avx512", since = "1.89")]999#[cfg_attr(test, assert_instr(vpsubb))]1000#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1001pub const fn _mm256_maskz_sub_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i {1002 unsafe {1003 let sub = _mm256_sub_epi8(a, b).as_i8x32();1004 transmute(simd_select_bitmask(k, sub, i8x32::ZERO))1005 }1006}10071008/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1009///1010/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi8&expand=5704)1011#[inline]1012#[target_feature(enable = "avx512bw,avx512vl")]1013#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1014#[cfg_attr(test, assert_instr(vpsubb))]1015#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1016pub const fn _mm_mask_sub_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i {1017 unsafe {1018 let sub = _mm_sub_epi8(a, b).as_i8x16();1019 transmute(simd_select_bitmask(k, sub, src.as_i8x16()))1020 }1021}10221023/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1024///1025/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi8&expand=5705)1026#[inline]1027#[target_feature(enable = "avx512bw,avx512vl")]1028#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1029#[cfg_attr(test, assert_instr(vpsubb))]1030#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1031pub const fn _mm_maskz_sub_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i {1032 unsafe {1033 let sub = _mm_sub_epi8(a, b).as_i8x16();1034 transmute(simd_select_bitmask(k, sub, i8x16::ZERO))1035 }1036}10371038/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst.1039///1040/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epu16&expand=5793)1041#[inline]1042#[target_feature(enable = "avx512bw")]1043#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1044#[cfg_attr(test, assert_instr(vpsubusw))]1045#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1046pub const fn _mm512_subs_epu16(a: __m512i, b: __m512i) -> __m512i {1047 unsafe { transmute(simd_saturating_sub(a.as_u16x32(), b.as_u16x32())) }1048}10491050/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1051///1052/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epu16&expand=5791)1053#[inline]1054#[target_feature(enable = "avx512bw")]1055#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1056#[cfg_attr(test, assert_instr(vpsubusw))]1057#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1058pub const fn _mm512_mask_subs_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1059 unsafe {1060 let sub = _mm512_subs_epu16(a, b).as_u16x32();1061 transmute(simd_select_bitmask(k, sub, src.as_u16x32()))1062 }1063}10641065/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1066///1067/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epu16&expand=5792)1068#[inline]1069#[target_feature(enable = "avx512bw")]1070#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1071#[cfg_attr(test, assert_instr(vpsubusw))]1072#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1073pub const fn _mm512_maskz_subs_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1074 unsafe {1075 let sub = _mm512_subs_epu16(a, b).as_u16x32();1076 transmute(simd_select_bitmask(k, sub, u16x32::ZERO))1077 }1078}10791080/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1081///1082/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epu16&expand=5788)1083#[inline]1084#[target_feature(enable = "avx512bw,avx512vl")]1085#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1086#[cfg_attr(test, assert_instr(vpsubusw))]1087#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1088pub const fn _mm256_mask_subs_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1089 unsafe {1090 let sub = _mm256_subs_epu16(a, b).as_u16x16();1091 transmute(simd_select_bitmask(k, sub, src.as_u16x16()))1092 }1093}10941095/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1096///1097/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epu16&expand=5789)1098#[inline]1099#[target_feature(enable = "avx512bw,avx512vl")]1100#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1101#[cfg_attr(test, assert_instr(vpsubusw))]1102#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1103pub const fn _mm256_maskz_subs_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1104 unsafe {1105 let sub = _mm256_subs_epu16(a, b).as_u16x16();1106 transmute(simd_select_bitmask(k, sub, u16x16::ZERO))1107 }1108}11091110/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1111///1112/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epu16&expand=5785)1113#[inline]1114#[target_feature(enable = "avx512bw,avx512vl")]1115#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1116#[cfg_attr(test, assert_instr(vpsubusw))]1117#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1118pub const fn _mm_mask_subs_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1119 unsafe {1120 let sub = _mm_subs_epu16(a, b).as_u16x8();1121 transmute(simd_select_bitmask(k, sub, src.as_u16x8()))1122 }1123}11241125/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1126///1127/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epu16&expand=5786)1128#[inline]1129#[target_feature(enable = "avx512bw,avx512vl")]1130#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1131#[cfg_attr(test, assert_instr(vpsubusw))]1132#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1133pub const fn _mm_maskz_subs_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1134 unsafe {1135 let sub = _mm_subs_epu16(a, b).as_u16x8();1136 transmute(simd_select_bitmask(k, sub, u16x8::ZERO))1137 }1138}11391140/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst.1141///1142/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epu8&expand=5802)1143#[inline]1144#[target_feature(enable = "avx512bw")]1145#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1146#[cfg_attr(test, assert_instr(vpsubusb))]1147#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1148pub const fn _mm512_subs_epu8(a: __m512i, b: __m512i) -> __m512i {1149 unsafe { transmute(simd_saturating_sub(a.as_u8x64(), b.as_u8x64())) }1150}11511152/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1153///1154/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epu8&expand=5800)1155#[inline]1156#[target_feature(enable = "avx512bw")]1157#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1158#[cfg_attr(test, assert_instr(vpsubusb))]1159#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1160pub const fn _mm512_mask_subs_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i {1161 unsafe {1162 let sub = _mm512_subs_epu8(a, b).as_u8x64();1163 transmute(simd_select_bitmask(k, sub, src.as_u8x64()))1164 }1165}11661167/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1168///1169/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epu8&expand=5801)1170#[inline]1171#[target_feature(enable = "avx512bw")]1172#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1173#[cfg_attr(test, assert_instr(vpsubusb))]1174#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1175pub const fn _mm512_maskz_subs_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i {1176 unsafe {1177 let sub = _mm512_subs_epu8(a, b).as_u8x64();1178 transmute(simd_select_bitmask(k, sub, u8x64::ZERO))1179 }1180}11811182/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1183///1184/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epu8&expand=5797)1185#[inline]1186#[target_feature(enable = "avx512bw,avx512vl")]1187#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1188#[cfg_attr(test, assert_instr(vpsubusb))]1189#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1190pub const fn _mm256_mask_subs_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i {1191 unsafe {1192 let sub = _mm256_subs_epu8(a, b).as_u8x32();1193 transmute(simd_select_bitmask(k, sub, src.as_u8x32()))1194 }1195}11961197/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1198///1199/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epu8&expand=5798)1200#[inline]1201#[target_feature(enable = "avx512bw,avx512vl")]1202#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1203#[cfg_attr(test, assert_instr(vpsubusb))]1204#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1205pub const fn _mm256_maskz_subs_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i {1206 unsafe {1207 let sub = _mm256_subs_epu8(a, b).as_u8x32();1208 transmute(simd_select_bitmask(k, sub, u8x32::ZERO))1209 }1210}12111212/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1213///1214/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epu8&expand=5794)1215#[inline]1216#[target_feature(enable = "avx512bw,avx512vl")]1217#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1218#[cfg_attr(test, assert_instr(vpsubusb))]1219#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1220pub const fn _mm_mask_subs_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i {1221 unsafe {1222 let sub = _mm_subs_epu8(a, b).as_u8x16();1223 transmute(simd_select_bitmask(k, sub, src.as_u8x16()))1224 }1225}12261227/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1228///1229/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epu8&expand=5795)1230#[inline]1231#[target_feature(enable = "avx512bw,avx512vl")]1232#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1233#[cfg_attr(test, assert_instr(vpsubusb))]1234#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1235pub const fn _mm_maskz_subs_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i {1236 unsafe {1237 let sub = _mm_subs_epu8(a, b).as_u8x16();1238 transmute(simd_select_bitmask(k, sub, u8x16::ZERO))1239 }1240}12411242/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst.1243///1244/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epi16&expand=5775)1245#[inline]1246#[target_feature(enable = "avx512bw")]1247#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1248#[cfg_attr(test, assert_instr(vpsubsw))]1249#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1250pub const fn _mm512_subs_epi16(a: __m512i, b: __m512i) -> __m512i {1251 unsafe { transmute(simd_saturating_sub(a.as_i16x32(), b.as_i16x32())) }1252}12531254/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1255///1256/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epi16&expand=5773)1257#[inline]1258#[target_feature(enable = "avx512bw")]1259#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1260#[cfg_attr(test, assert_instr(vpsubsw))]1261#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1262pub const fn _mm512_mask_subs_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1263 unsafe {1264 let sub = _mm512_subs_epi16(a, b).as_i16x32();1265 transmute(simd_select_bitmask(k, sub, src.as_i16x32()))1266 }1267}12681269/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1270///1271/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epi16&expand=5774)1272#[inline]1273#[target_feature(enable = "avx512bw")]1274#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1275#[cfg_attr(test, assert_instr(vpsubsw))]1276#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1277pub const fn _mm512_maskz_subs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1278 unsafe {1279 let sub = _mm512_subs_epi16(a, b).as_i16x32();1280 transmute(simd_select_bitmask(k, sub, i16x32::ZERO))1281 }1282}12831284/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1285///1286/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epi16&expand=5770)1287#[inline]1288#[target_feature(enable = "avx512bw,avx512vl")]1289#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1290#[cfg_attr(test, assert_instr(vpsubsw))]1291#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1292pub const fn _mm256_mask_subs_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1293 unsafe {1294 let sub = _mm256_subs_epi16(a, b).as_i16x16();1295 transmute(simd_select_bitmask(k, sub, src.as_i16x16()))1296 }1297}12981299/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1300///1301/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epi16&expand=5771)1302#[inline]1303#[target_feature(enable = "avx512bw,avx512vl")]1304#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1305#[cfg_attr(test, assert_instr(vpsubsw))]1306#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1307pub const fn _mm256_maskz_subs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1308 unsafe {1309 let sub = _mm256_subs_epi16(a, b).as_i16x16();1310 transmute(simd_select_bitmask(k, sub, i16x16::ZERO))1311 }1312}13131314/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1315///1316/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epi16&expand=5767)1317#[inline]1318#[target_feature(enable = "avx512bw,avx512vl")]1319#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1320#[cfg_attr(test, assert_instr(vpsubsw))]1321#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1322pub const fn _mm_mask_subs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1323 unsafe {1324 let sub = _mm_subs_epi16(a, b).as_i16x8();1325 transmute(simd_select_bitmask(k, sub, src.as_i16x8()))1326 }1327}13281329/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1330///1331/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epi16&expand=5768)1332#[inline]1333#[target_feature(enable = "avx512bw,avx512vl")]1334#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1335#[cfg_attr(test, assert_instr(vpsubsw))]1336#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1337pub const fn _mm_maskz_subs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1338 unsafe {1339 let sub = _mm_subs_epi16(a, b).as_i16x8();1340 transmute(simd_select_bitmask(k, sub, i16x8::ZERO))1341 }1342}13431344/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst.1345///1346/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epi8&expand=5784)1347#[inline]1348#[target_feature(enable = "avx512bw")]1349#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1350#[cfg_attr(test, assert_instr(vpsubsb))]1351#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1352pub const fn _mm512_subs_epi8(a: __m512i, b: __m512i) -> __m512i {1353 unsafe { transmute(simd_saturating_sub(a.as_i8x64(), b.as_i8x64())) }1354}13551356/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1357///1358/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epi8&expand=5782)1359#[inline]1360#[target_feature(enable = "avx512bw")]1361#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1362#[cfg_attr(test, assert_instr(vpsubsb))]1363#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1364pub const fn _mm512_mask_subs_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i {1365 unsafe {1366 let sub = _mm512_subs_epi8(a, b).as_i8x64();1367 transmute(simd_select_bitmask(k, sub, src.as_i8x64()))1368 }1369}13701371/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1372///1373/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epi8&expand=5783)1374#[inline]1375#[target_feature(enable = "avx512bw")]1376#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1377#[cfg_attr(test, assert_instr(vpsubsb))]1378#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1379pub const fn _mm512_maskz_subs_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i {1380 unsafe {1381 let sub = _mm512_subs_epi8(a, b).as_i8x64();1382 transmute(simd_select_bitmask(k, sub, i8x64::ZERO))1383 }1384}13851386/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1387///1388/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epi8&expand=5779)1389#[inline]1390#[target_feature(enable = "avx512bw,avx512vl")]1391#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1392#[cfg_attr(test, assert_instr(vpsubsb))]1393#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1394pub const fn _mm256_mask_subs_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i {1395 unsafe {1396 let sub = _mm256_subs_epi8(a, b).as_i8x32();1397 transmute(simd_select_bitmask(k, sub, src.as_i8x32()))1398 }1399}14001401/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1402///1403/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epi8&expand=5780)1404#[inline]1405#[target_feature(enable = "avx512bw,avx512vl")]1406#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1407#[cfg_attr(test, assert_instr(vpsubsb))]1408#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1409pub const fn _mm256_maskz_subs_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i {1410 unsafe {1411 let sub = _mm256_subs_epi8(a, b).as_i8x32();1412 transmute(simd_select_bitmask(k, sub, i8x32::ZERO))1413 }1414}14151416/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1417///1418/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epi8&expand=5776)1419#[inline]1420#[target_feature(enable = "avx512bw,avx512vl")]1421#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1422#[cfg_attr(test, assert_instr(vpsubsb))]1423#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1424pub const fn _mm_mask_subs_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i {1425 unsafe {1426 let sub = _mm_subs_epi8(a, b).as_i8x16();1427 transmute(simd_select_bitmask(k, sub, src.as_i8x16()))1428 }1429}14301431/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1432///1433/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epi8&expand=5777)1434#[inline]1435#[target_feature(enable = "avx512bw,avx512vl")]1436#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1437#[cfg_attr(test, assert_instr(vpsubsb))]1438#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1439pub const fn _mm_maskz_subs_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i {1440 unsafe {1441 let sub = _mm_subs_epi8(a, b).as_i8x16();1442 transmute(simd_select_bitmask(k, sub, i8x16::ZERO))1443 }1444}14451446/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst.1447///1448/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mulhi_epu16&expand=3973)1449#[inline]1450#[target_feature(enable = "avx512bw")]1451#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1452#[cfg_attr(test, assert_instr(vpmulhuw))]1453#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1454pub const fn _mm512_mulhi_epu16(a: __m512i, b: __m512i) -> __m512i {1455 unsafe {1456 let a = simd_cast::<_, u32x32>(a.as_u16x32());1457 let b = simd_cast::<_, u32x32>(b.as_u16x32());1458 let r = simd_shr(simd_mul(a, b), u32x32::splat(16));1459 transmute(simd_cast::<u32x32, u16x32>(r))1460 }1461}14621463/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1464///1465/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mulhi_epu16&expand=3971)1466#[inline]1467#[target_feature(enable = "avx512bw")]1468#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1469#[cfg_attr(test, assert_instr(vpmulhuw))]1470#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1471pub const fn _mm512_mask_mulhi_epu16(1472 src: __m512i,1473 k: __mmask32,1474 a: __m512i,1475 b: __m512i,1476) -> __m512i {1477 unsafe {1478 let mul = _mm512_mulhi_epu16(a, b).as_u16x32();1479 transmute(simd_select_bitmask(k, mul, src.as_u16x32()))1480 }1481}14821483/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1484///1485/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mulhi_epu16&expand=3972)1486#[inline]1487#[target_feature(enable = "avx512bw")]1488#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1489#[cfg_attr(test, assert_instr(vpmulhuw))]1490#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1491pub const fn _mm512_maskz_mulhi_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1492 unsafe {1493 let mul = _mm512_mulhi_epu16(a, b).as_u16x32();1494 transmute(simd_select_bitmask(k, mul, u16x32::ZERO))1495 }1496}14971498/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1499///1500/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mulhi_epu16&expand=3968)1501#[inline]1502#[target_feature(enable = "avx512bw,avx512vl")]1503#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1504#[cfg_attr(test, assert_instr(vpmulhuw))]1505#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1506pub const fn _mm256_mask_mulhi_epu16(1507 src: __m256i,1508 k: __mmask16,1509 a: __m256i,1510 b: __m256i,1511) -> __m256i {1512 unsafe {1513 let mul = _mm256_mulhi_epu16(a, b).as_u16x16();1514 transmute(simd_select_bitmask(k, mul, src.as_u16x16()))1515 }1516}15171518/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1519///1520/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mulhi_epu16&expand=3969)1521#[inline]1522#[target_feature(enable = "avx512bw,avx512vl")]1523#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1524#[cfg_attr(test, assert_instr(vpmulhuw))]1525#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1526pub const fn _mm256_maskz_mulhi_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1527 unsafe {1528 let mul = _mm256_mulhi_epu16(a, b).as_u16x16();1529 transmute(simd_select_bitmask(k, mul, u16x16::ZERO))1530 }1531}15321533/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1534///1535/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mulhi_epu16&expand=3965)1536#[inline]1537#[target_feature(enable = "avx512bw,avx512vl")]1538#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1539#[cfg_attr(test, assert_instr(vpmulhuw))]1540#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1541pub const fn _mm_mask_mulhi_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1542 unsafe {1543 let mul = _mm_mulhi_epu16(a, b).as_u16x8();1544 transmute(simd_select_bitmask(k, mul, src.as_u16x8()))1545 }1546}15471548/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1549///1550/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mulhi_epu16&expand=3966)1551#[inline]1552#[target_feature(enable = "avx512bw,avx512vl")]1553#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1554#[cfg_attr(test, assert_instr(vpmulhuw))]1555#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1556pub const fn _mm_maskz_mulhi_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1557 unsafe {1558 let mul = _mm_mulhi_epu16(a, b).as_u16x8();1559 transmute(simd_select_bitmask(k, mul, u16x8::ZERO))1560 }1561}15621563/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst.1564///1565/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mulhi_epi16&expand=3962)1566#[inline]1567#[target_feature(enable = "avx512bw")]1568#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1569#[cfg_attr(test, assert_instr(vpmulhw))]1570#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1571pub const fn _mm512_mulhi_epi16(a: __m512i, b: __m512i) -> __m512i {1572 unsafe {1573 let a = simd_cast::<_, i32x32>(a.as_i16x32());1574 let b = simd_cast::<_, i32x32>(b.as_i16x32());1575 let r = simd_shr(simd_mul(a, b), i32x32::splat(16));1576 transmute(simd_cast::<i32x32, i16x32>(r))1577 }1578}15791580/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1581///1582/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mulhi_epi16&expand=3960)1583#[inline]1584#[target_feature(enable = "avx512bw")]1585#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1586#[cfg_attr(test, assert_instr(vpmulhw))]1587#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1588pub const fn _mm512_mask_mulhi_epi16(1589 src: __m512i,1590 k: __mmask32,1591 a: __m512i,1592 b: __m512i,1593) -> __m512i {1594 unsafe {1595 let mul = _mm512_mulhi_epi16(a, b).as_i16x32();1596 transmute(simd_select_bitmask(k, mul, src.as_i16x32()))1597 }1598}15991600/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1601///1602/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mulhi_epi16&expand=3961)1603#[inline]1604#[target_feature(enable = "avx512bw")]1605#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1606#[cfg_attr(test, assert_instr(vpmulhw))]1607#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1608pub const fn _mm512_maskz_mulhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1609 unsafe {1610 let mul = _mm512_mulhi_epi16(a, b).as_i16x32();1611 transmute(simd_select_bitmask(k, mul, i16x32::ZERO))1612 }1613}16141615/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1616///1617/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mulhi_epi16&expand=3957)1618#[inline]1619#[target_feature(enable = "avx512bw,avx512vl")]1620#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1621#[cfg_attr(test, assert_instr(vpmulhw))]1622#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1623pub const fn _mm256_mask_mulhi_epi16(1624 src: __m256i,1625 k: __mmask16,1626 a: __m256i,1627 b: __m256i,1628) -> __m256i {1629 unsafe {1630 let mul = _mm256_mulhi_epi16(a, b).as_i16x16();1631 transmute(simd_select_bitmask(k, mul, src.as_i16x16()))1632 }1633}16341635/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1636///1637/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mulhi_epi16&expand=3958)1638#[inline]1639#[target_feature(enable = "avx512bw,avx512vl")]1640#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1641#[cfg_attr(test, assert_instr(vpmulhw))]1642#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1643pub const fn _mm256_maskz_mulhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1644 unsafe {1645 let mul = _mm256_mulhi_epi16(a, b).as_i16x16();1646 transmute(simd_select_bitmask(k, mul, i16x16::ZERO))1647 }1648}16491650/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1651///1652/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mulhi_epi16&expand=3954)1653#[inline]1654#[target_feature(enable = "avx512bw,avx512vl")]1655#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1656#[cfg_attr(test, assert_instr(vpmulhw))]1657#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1658pub const fn _mm_mask_mulhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1659 unsafe {1660 let mul = _mm_mulhi_epi16(a, b).as_i16x8();1661 transmute(simd_select_bitmask(k, mul, src.as_i16x8()))1662 }1663}16641665/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1666///1667/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mulhi_epi16&expand=3955)1668#[inline]1669#[target_feature(enable = "avx512bw,avx512vl")]1670#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1671#[cfg_attr(test, assert_instr(vpmulhw))]1672#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1673pub const fn _mm_maskz_mulhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1674 unsafe {1675 let mul = _mm_mulhi_epi16(a, b).as_i16x8();1676 transmute(simd_select_bitmask(k, mul, i16x8::ZERO))1677 }1678}16791680/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst.1681///1682/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mulhrs_epi16&expand=3986)1683#[inline]1684#[target_feature(enable = "avx512bw")]1685#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1686#[cfg_attr(test, assert_instr(vpmulhrsw))]1687pub fn _mm512_mulhrs_epi16(a: __m512i, b: __m512i) -> __m512i {1688 unsafe { transmute(vpmulhrsw(a.as_i16x32(), b.as_i16x32())) }1689}16901691/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1692///1693/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mulhrs_epi16&expand=3984)1694#[inline]1695#[target_feature(enable = "avx512bw")]1696#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1697#[cfg_attr(test, assert_instr(vpmulhrsw))]1698pub fn _mm512_mask_mulhrs_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1699 unsafe {1700 let mul = _mm512_mulhrs_epi16(a, b).as_i16x32();1701 transmute(simd_select_bitmask(k, mul, src.as_i16x32()))1702 }1703}17041705/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1706///1707/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mulhrs_epi16&expand=3985)1708#[inline]1709#[target_feature(enable = "avx512bw")]1710#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1711#[cfg_attr(test, assert_instr(vpmulhrsw))]1712pub fn _mm512_maskz_mulhrs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1713 unsafe {1714 let mul = _mm512_mulhrs_epi16(a, b).as_i16x32();1715 transmute(simd_select_bitmask(k, mul, i16x32::ZERO))1716 }1717}17181719/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1720///1721/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mulhrs_epi16&expand=3981)1722#[inline]1723#[target_feature(enable = "avx512bw,avx512vl")]1724#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1725#[cfg_attr(test, assert_instr(vpmulhrsw))]1726pub fn _mm256_mask_mulhrs_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1727 unsafe {1728 let mul = _mm256_mulhrs_epi16(a, b).as_i16x16();1729 transmute(simd_select_bitmask(k, mul, src.as_i16x16()))1730 }1731}17321733/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1734///1735/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mulhrs_epi16&expand=3982)1736#[inline]1737#[target_feature(enable = "avx512bw,avx512vl")]1738#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1739#[cfg_attr(test, assert_instr(vpmulhrsw))]1740pub fn _mm256_maskz_mulhrs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1741 unsafe {1742 let mul = _mm256_mulhrs_epi16(a, b).as_i16x16();1743 transmute(simd_select_bitmask(k, mul, i16x16::ZERO))1744 }1745}17461747/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1748///1749/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mulhrs_epi16&expand=3978)1750#[inline]1751#[target_feature(enable = "avx512bw,avx512vl")]1752#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1753#[cfg_attr(test, assert_instr(vpmulhrsw))]1754pub fn _mm_mask_mulhrs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1755 unsafe {1756 let mul = _mm_mulhrs_epi16(a, b).as_i16x8();1757 transmute(simd_select_bitmask(k, mul, src.as_i16x8()))1758 }1759}17601761/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1762///1763/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mulhrs_epi16&expand=3979)1764#[inline]1765#[target_feature(enable = "avx512bw,avx512vl")]1766#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1767#[cfg_attr(test, assert_instr(vpmulhrsw))]1768pub fn _mm_maskz_mulhrs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1769 unsafe {1770 let mul = _mm_mulhrs_epi16(a, b).as_i16x8();1771 transmute(simd_select_bitmask(k, mul, i16x8::ZERO))1772 }1773}17741775/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst.1776///1777/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mullo_epi16&expand=3996)1778#[inline]1779#[target_feature(enable = "avx512bw")]1780#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1781#[cfg_attr(test, assert_instr(vpmullw))]1782#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1783pub const fn _mm512_mullo_epi16(a: __m512i, b: __m512i) -> __m512i {1784 unsafe { transmute(simd_mul(a.as_i16x32(), b.as_i16x32())) }1785}17861787/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1788///1789/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mullo_epi16&expand=3994)1790#[inline]1791#[target_feature(enable = "avx512bw")]1792#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1793#[cfg_attr(test, assert_instr(vpmullw))]1794#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1795pub const fn _mm512_mask_mullo_epi16(1796 src: __m512i,1797 k: __mmask32,1798 a: __m512i,1799 b: __m512i,1800) -> __m512i {1801 unsafe {1802 let mul = _mm512_mullo_epi16(a, b).as_i16x32();1803 transmute(simd_select_bitmask(k, mul, src.as_i16x32()))1804 }1805}18061807/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1808///1809/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mullo_epi16&expand=3995)1810#[inline]1811#[target_feature(enable = "avx512bw")]1812#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1813#[cfg_attr(test, assert_instr(vpmullw))]1814#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1815pub const fn _mm512_maskz_mullo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1816 unsafe {1817 let mul = _mm512_mullo_epi16(a, b).as_i16x32();1818 transmute(simd_select_bitmask(k, mul, i16x32::ZERO))1819 }1820}18211822/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1823///1824/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mullo_epi16&expand=3991)1825#[inline]1826#[target_feature(enable = "avx512bw,avx512vl")]1827#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1828#[cfg_attr(test, assert_instr(vpmullw))]1829#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1830pub const fn _mm256_mask_mullo_epi16(1831 src: __m256i,1832 k: __mmask16,1833 a: __m256i,1834 b: __m256i,1835) -> __m256i {1836 unsafe {1837 let mul = _mm256_mullo_epi16(a, b).as_i16x16();1838 transmute(simd_select_bitmask(k, mul, src.as_i16x16()))1839 }1840}18411842/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1843///1844/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mullo_epi16&expand=3992)1845#[inline]1846#[target_feature(enable = "avx512bw,avx512vl")]1847#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1848#[cfg_attr(test, assert_instr(vpmullw))]1849#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1850pub const fn _mm256_maskz_mullo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1851 unsafe {1852 let mul = _mm256_mullo_epi16(a, b).as_i16x16();1853 transmute(simd_select_bitmask(k, mul, i16x16::ZERO))1854 }1855}18561857/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1858///1859/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mullo_epi16&expand=3988)1860#[inline]1861#[target_feature(enable = "avx512bw,avx512vl")]1862#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1863#[cfg_attr(test, assert_instr(vpmullw))]1864#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1865pub const fn _mm_mask_mullo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1866 unsafe {1867 let mul = _mm_mullo_epi16(a, b).as_i16x8();1868 transmute(simd_select_bitmask(k, mul, src.as_i16x8()))1869 }1870}18711872/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1873///1874/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mullo_epi16&expand=3989)1875#[inline]1876#[target_feature(enable = "avx512bw,avx512vl")]1877#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1878#[cfg_attr(test, assert_instr(vpmullw))]1879#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1880pub const fn _mm_maskz_mullo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1881 unsafe {1882 let mul = _mm_mullo_epi16(a, b).as_i16x8();1883 transmute(simd_select_bitmask(k, mul, i16x8::ZERO))1884 }1885}18861887/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst.1888///1889/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu16&expand=3609)1890#[inline]1891#[target_feature(enable = "avx512bw")]1892#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1893#[cfg_attr(test, assert_instr(vpmaxuw))]1894#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1895pub const fn _mm512_max_epu16(a: __m512i, b: __m512i) -> __m512i {1896 unsafe { simd_imax(a.as_u16x32(), b.as_u16x32()).as_m512i() }1897}18981899/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1900///1901/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epu16&expand=3607)1902#[inline]1903#[target_feature(enable = "avx512bw")]1904#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1905#[cfg_attr(test, assert_instr(vpmaxuw))]1906#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1907pub const fn _mm512_mask_max_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1908 unsafe {1909 let max = _mm512_max_epu16(a, b).as_u16x32();1910 transmute(simd_select_bitmask(k, max, src.as_u16x32()))1911 }1912}19131914/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1915///1916/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epu16&expand=3608)1917#[inline]1918#[target_feature(enable = "avx512bw")]1919#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1920#[cfg_attr(test, assert_instr(vpmaxuw))]1921#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1922pub const fn _mm512_maskz_max_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i {1923 unsafe {1924 let max = _mm512_max_epu16(a, b).as_u16x32();1925 transmute(simd_select_bitmask(k, max, u16x32::ZERO))1926 }1927}19281929/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1930///1931/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epu16&expand=3604)1932#[inline]1933#[target_feature(enable = "avx512bw,avx512vl")]1934#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1935#[cfg_attr(test, assert_instr(vpmaxuw))]1936#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1937pub const fn _mm256_mask_max_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1938 unsafe {1939 let max = _mm256_max_epu16(a, b).as_u16x16();1940 transmute(simd_select_bitmask(k, max, src.as_u16x16()))1941 }1942}19431944/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1945///1946/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epu16&expand=3605)1947#[inline]1948#[target_feature(enable = "avx512bw,avx512vl")]1949#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1950#[cfg_attr(test, assert_instr(vpmaxuw))]1951#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1952pub const fn _mm256_maskz_max_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i {1953 unsafe {1954 let max = _mm256_max_epu16(a, b).as_u16x16();1955 transmute(simd_select_bitmask(k, max, u16x16::ZERO))1956 }1957}19581959/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).1960///1961/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epu16&expand=3601)1962#[inline]1963#[target_feature(enable = "avx512bw,avx512vl")]1964#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1965#[cfg_attr(test, assert_instr(vpmaxuw))]1966#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1967pub const fn _mm_mask_max_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1968 unsafe {1969 let max = _mm_max_epu16(a, b).as_u16x8();1970 transmute(simd_select_bitmask(k, max, src.as_u16x8()))1971 }1972}19731974/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).1975///1976/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epu16&expand=3602)1977#[inline]1978#[target_feature(enable = "avx512bw,avx512vl")]1979#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1980#[cfg_attr(test, assert_instr(vpmaxuw))]1981#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1982pub const fn _mm_maskz_max_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i {1983 unsafe {1984 let max = _mm_max_epu16(a, b).as_u16x8();1985 transmute(simd_select_bitmask(k, max, u16x8::ZERO))1986 }1987}19881989/// Compare packed unsigned 8-bit integers in a and b, and store packed maximum values in dst.1990///1991/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu8&expand=3636)1992#[inline]1993#[target_feature(enable = "avx512bw")]1994#[stable(feature = "stdarch_x86_avx512", since = "1.89")]1995#[cfg_attr(test, assert_instr(vpmaxub))]1996#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]1997pub const fn _mm512_max_epu8(a: __m512i, b: __m512i) -> __m512i {1998 unsafe { simd_imax(a.as_u8x64(), b.as_u8x64()).as_m512i() }1999}
Findings
✓ No findings reported for this file.