Critical: Use of 'unsafe' keyword bypasses Rust's safety guarantees. Requires careful auditing, clear justification (FFI, specific optimizations), and minimal scope.
unsafe {
1#![feature(core_intrinsics, coroutines, coroutine_trait, repr_simd, tuple_trait, unboxed_closures)]2#![allow(internal_features)]34#[cfg(target_arch = "x86_64")]5use std::arch::asm;6#[cfg(target_arch = "x86_64")]7use std::arch::x86_64::*;8use std::hint::black_box;9use std::io::Write;10use std::ops::Coroutine;1112fn main() {13 println!("{:?}", std::env::args().collect::<Vec<_>>());1415 let mutex = std::sync::Mutex::new(());16 let _guard = mutex.lock().unwrap();1718 let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();19 let stderr = ::std::io::stderr();20 let mut stderr = stderr.lock();2122 std::thread::spawn(move || {23 println!("Hello from another thread!");24 });2526 writeln!(stderr, "some {} text", "<unknown>").unwrap();2728 let _ = std::process::Command::new("true").env("c", "d").spawn();2930 println!("cargo:rustc-link-lib=z");3132 static ONCE: std::sync::Once = std::sync::Once::new();33 ONCE.call_once(|| {});3435 let _eq = LoopState::Continue(()) == LoopState::Break(());3637 // Make sure ByValPair values with differently sized components are correctly passed38 map(None::<(u8, Box<Instruction>)>);3940 println!("{}", 2.3f32.exp());41 println!("{}", 2.3f32.exp2());42 println!("{}", 2.3f32.abs());43 println!("{}", 2.3f32.sqrt());44 println!("{}", 2.3f32.floor());45 println!("{}", 2.3f32.ceil());46 println!("{}", 2.3f32.min(1.0));47 println!("{}", 2.3f32.max(1.0));48 println!("{}", 2.3f32.powi(2));49 println!("{}", 2.3f32.log2());50 assert_eq!(2.3f32.copysign(-1.0), -2.3f32);51 println!("{}", 2.3f32.powf(2.0));5253 assert_eq!(i64::MAX.checked_mul(2), None);5455 assert_eq!(-128i8, (-128i8).saturating_sub(1));56 assert_eq!(127i8, 127i8.saturating_sub(-128));57 assert_eq!(-128i8, (-128i8).saturating_add(-128));58 assert_eq!(127i8, 127i8.saturating_add(1));5960 assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);61 assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);62 assert_eq!(63 core::intrinsics::saturating_sub(0, -170141183460469231731687303715884105728i128),64 170141183460469231731687303715884105727i12865 );6667 std::hint::black_box(std::hint::black_box(7571400400375753350092698930310845914i128) * 10);68 assert!(0i128.checked_div(2i128).is_some());69 assert!(0u128.checked_div(2u128).is_some());70 assert_eq!(1u128 + 2, 3);7172 assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);73 assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);74 assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);7576 let tmp = 353985398u128;77 assert_eq!(tmp * 932490u128, 330087843781020u128);7879 let tmp = -0x1234_5678_9ABC_DEF0i64;80 assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);8182 // Check that all u/i128 <-> float casts work correctly.83 let houndred_u128 = 100u128;84 let houndred_i128 = 100i128;85 let houndred_f32 = 100.0f32;86 let houndred_f64 = 100.0f64;87 assert_eq!(houndred_u128 as f32, 100.0);88 assert_eq!(houndred_u128 as f64, 100.0);89 assert_eq!(houndred_f32 as u128, 100);90 assert_eq!(houndred_f64 as u128, 100);91 assert_eq!(houndred_i128 as f32, 100.0);92 assert_eq!(houndred_i128 as f64, 100.0);93 assert_eq!(houndred_f32 as i128, 100);94 assert_eq!(houndred_f64 as i128, 100);95 assert_eq!(1u128.rotate_left(2), 4);9697 assert_eq!(black_box(f32::NAN) as i128, 0);98 assert_eq!(black_box(f32::NAN) as u128, 0);99100 // Test signed 128bit comparing101 let max = usize::MAX as i128;102 if 100i128 < 0i128 || 100i128 > max {103 panic!();104 }105106 test_checked_mul();107108 let _a = 1u32 << 2u8;109110 let empty: [i32; 0] = [];111 assert!(empty.is_sorted());112113 println!("{:?}", std::intrinsics::caller_location());114115 #[cfg(target_arch = "x86_64")]116 unsafe {117 test_simd();118 }119120 Box::pin(121 #[coroutine]122 move |mut _task_context| {123 yield ();124 },125 )126 .as_mut()127 .resume(0);128129 #[derive(Copy, Clone)]130 enum Nums {131 NegOne = -1,132 }133134 let kind = Nums::NegOne;135 assert_eq!(-1i128, kind as i128);136137 let options = [1u128];138 match options[0] {139 1 => (),140 0 => loop {},141 v => panic(v),142 };143144 if black_box(false) {145 // Based on https://github.com/rust-lang/rust/blob/2f320a224e827b400be25966755a621779f797cc/src/test/ui/debuginfo/debuginfo_with_uninhabitable_field_and_unsized.rs146 let _ = Foo::<dyn Send>::new();147148 #[allow(dead_code)]149 struct Foo<T: ?Sized> {150 base: Never,151 value: T,152 }153154 impl<T: ?Sized> Foo<T> {155 pub fn new() -> Box<Foo<T>> {156 todo!()157 }158 }159160 enum Never {}161 }162163 #[cfg(not(target_arch = "s390x"))] // s390x doesn't have vector instructions enabled by default164 foo(I64X2([0, 0]));165166 transmute_wide_pointer();167168 rust_call_abi();169170 // #[cfg(target_arch = "x86_64")]171 // inline_asm_call_custom_abi();172173 const fn no_str() -> Option<Box<str>> {174 None175 }176177 static STATIC_WITH_MAYBE_NESTED_BOX: &Option<Box<str>> = &no_str();178179 println!("{:?}", STATIC_WITH_MAYBE_NESTED_BOX);180}181182fn panic(_: u128) {183 panic!();184}185186use std::mem::transmute;187188#[cfg(target_pointer_width = "32")]189type TwoPtrs = i64;190#[cfg(target_pointer_width = "64")]191type TwoPtrs = i128;192193fn transmute_wide_pointer() -> TwoPtrs {194 unsafe { transmute::<_, TwoPtrs>("true !") }195}196197extern "rust-call" fn rust_call_abi_callee<T: std::marker::Tuple>(_: T) {}198199fn rust_call_abi() {200 rust_call_abi_callee(());201 rust_call_abi_callee((1, 2));202}203204#[cfg_attr(target_arch = "s390x", allow(dead_code))]205#[repr(simd)]206struct I64X2([i64; 2]);207208#[cfg_attr(target_arch = "s390x", allow(dead_code))]209#[allow(improper_ctypes_definitions)]210extern "C" fn foo(_a: I64X2) {}211212#[cfg(target_arch = "x86_64")]213#[target_feature(enable = "sse4.2")]214#[cfg(not(jit))]215unsafe fn test_crc32() {216 assert!(is_x86_feature_detected!("sse4.2"));217218 let a = 42u32;219 let b = 0xdeadbeefu64;220221 assert_eq!(_mm_crc32_u8(a, b as u8), 4135334616);222 assert_eq!(_mm_crc32_u16(a, b as u16), 1200687288);223 assert_eq!(_mm_crc32_u32(a, b as u32), 2543798776);224 assert_eq!(_mm_crc32_u64(a as u64, b as u64), 241952147);225}226227#[cfg(target_arch = "x86_64")]228#[target_feature(enable = "sse2")]229unsafe fn test_simd() {230 unsafe {231 assert!(is_x86_feature_detected!("sse2"));232233 let x = _mm_setzero_si128();234 let y = _mm_set1_epi16(7);235 let or = _mm_or_si128(x, y);236 let cmp_eq = _mm_cmpeq_epi8(y, y);237 let cmp_lt = _mm_cmplt_epi8(y, y);238239 let (zero0, zero1) = std::mem::transmute::<_, (u64, u64)>(x);240 assert_eq!((zero0, zero1), (0, 0));241 assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);242 assert_eq!(243 std::mem::transmute::<_, [u16; 8]>(cmp_eq),244 [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]245 );246 assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);247248 test_mm_slli_si128();249 test_mm_movemask_epi8();250 test_mm256_movemask_epi8();251 test_mm_add_epi8();252 test_mm_add_pd();253 test_mm_cvtepi8_epi16();254 #[cfg(not(jit))]255 test_mm_cvtps_epi32();256 test_mm_cvttps_epi32();257 test_mm_cvtsi128_si64();258259 #[cfg(not(jit))]260 test_mm_cvtps_ph();261262 test_mm_extract_epi8();263 test_mm_insert_epi16();264 test_mm_shuffle_epi8();265266 #[cfg(not(jit))]267 test_mm_cmpestri();268269 test_mm256_shuffle_epi8();270 test_mm256_permute2x128_si256();271 test_mm256_permutevar8x32_epi32();272273 #[rustfmt::skip]274 let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));275 assert_eq!(mask1, 1);276277 #[cfg(not(jit))]278 test_crc32();279280 #[cfg(not(jit))]281 test_xmm_roundtrip();282 #[cfg(not(jit))]283 if is_x86_feature_detected!("avx") {284 test_ymm_roundtrip();285 }286 #[cfg(not(jit))]287 if is_x86_feature_detected!("avx512f") {288 test_zmm_roundtrip();289 }290 }291}292293#[cfg(target_arch = "x86_64")]294#[target_feature(enable = "sse2")]295unsafe fn test_mm_slli_si128() {296 #[rustfmt::skip]297 let a = _mm_setr_epi8(298 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,299 );300 let r = _mm_slli_si128(a, 1);301 let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);302 assert_eq_m128i(r, e);303304 #[rustfmt::skip]305 let a = _mm_setr_epi8(306 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,307 );308 let r = _mm_slli_si128(a, 15);309 let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);310 assert_eq_m128i(r, e);311312 #[rustfmt::skip]313 let a = _mm_setr_epi8(314 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,315 );316 let r = _mm_slli_si128(a, 16);317 assert_eq_m128i(r, _mm_set1_epi8(0));318}319320#[cfg(target_arch = "x86_64")]321#[target_feature(enable = "sse2")]322unsafe fn test_mm_movemask_epi8() {323 #[rustfmt::skip]324 let a = _mm_setr_epi8(325 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,326 0b0101, 0b1111_0000u8 as i8, 0, 0,327 0, 0, 0b1111_0000u8 as i8, 0b0101,328 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,329 );330 let r = _mm_movemask_epi8(a);331 assert_eq!(r, 0b10100100_00100101);332}333334#[cfg(target_arch = "x86_64")]335#[target_feature(enable = "avx2")]336unsafe fn test_mm256_movemask_epi8() {337 let a = _mm256_set1_epi8(-1);338 let r = _mm256_movemask_epi8(a);339 let e = -1;340 assert_eq!(r, e);341}342343#[cfg(target_arch = "x86_64")]344#[target_feature(enable = "sse2")]345unsafe fn test_mm_add_epi8() {346 let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);347 #[rustfmt::skip]348 let b = _mm_setr_epi8(349 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,350 );351 let r = _mm_add_epi8(a, b);352 #[rustfmt::skip]353 let e = _mm_setr_epi8(354 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,355 );356 assert_eq_m128i(r, e);357}358359#[cfg(target_arch = "x86_64")]360#[target_feature(enable = "sse2")]361unsafe fn test_mm_add_pd() {362 let a = _mm_setr_pd(1.0, 2.0);363 let b = _mm_setr_pd(5.0, 10.0);364 let r = _mm_add_pd(a, b);365 assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));366}367368#[cfg(target_arch = "x86_64")]369fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {370 unsafe {371 assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));372 }373}374375#[cfg(target_arch = "x86_64")]376#[target_feature(enable = "sse2")]377pub fn assert_eq_m128d(a: __m128d, b: __m128d) {378 if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {379 panic!("{:?} != {:?}", a, b);380 }381}382383#[cfg(target_arch = "x86_64")]384#[target_feature(enable = "avx")]385pub fn assert_eq_m256i(a: __m256i, b: __m256i) {386 unsafe {387 assert_eq!(std::mem::transmute::<_, [u64; 4]>(a), std::mem::transmute::<_, [u64; 4]>(b))388 }389}390391#[cfg(target_arch = "x86_64")]392#[target_feature(enable = "sse2")]393unsafe fn test_mm_cvtsi128_si64() {394 unsafe {395 let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));396 assert_eq!(r, 5);397 }398}399400#[cfg(target_arch = "x86_64")]401#[target_feature(enable = "sse4.1")]402unsafe fn test_mm_cvtepi8_epi16() {403 let a = _mm_set1_epi8(10);404 let r = _mm_cvtepi8_epi16(a);405 let e = _mm_set1_epi16(10);406 assert_eq_m128i(r, e);407 let a = _mm_set1_epi8(-10);408 let r = _mm_cvtepi8_epi16(a);409 let e = _mm_set1_epi16(-10);410 assert_eq_m128i(r, e);411}412413#[cfg(target_arch = "x86_64")]414#[target_feature(enable = "sse4.1")]415unsafe fn test_mm_extract_epi8() {416 #[rustfmt::skip]417 let a = _mm_setr_epi8(418 -1, 1, 2, 3, 4, 5, 6, 7,419 8, 9, 10, 11, 12, 13, 14, 15420 );421 let r1 = _mm_extract_epi8(a, 0);422 let r2 = _mm_extract_epi8(a, 3);423 assert_eq!(r1, 0xFF);424 assert_eq!(r2, 3);425}426427#[cfg(target_arch = "x86_64")]428#[target_feature(enable = "sse2")]429unsafe fn test_mm_insert_epi16() {430 let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);431 let r = _mm_insert_epi16(a, 9, 0);432 let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);433 assert_eq_m128i(r, e);434}435436#[cfg(target_arch = "x86_64")]437#[target_feature(enable = "ssse3")]438unsafe fn test_mm_shuffle_epi8() {439 #[rustfmt::skip]440 let a = _mm_setr_epi8(441 1, 2, 3, 4, 5, 6, 7, 8,442 9, 10, 11, 12, 13, 14, 15, 16,443 );444 #[rustfmt::skip]445 let b = _mm_setr_epi8(446 4, 128_u8 as i8, 4, 3,447 24, 12, 6, 19,448 12, 5, 5, 10,449 4, 1, 8, 0,450 );451 let expected = _mm_setr_epi8(5, 0, 5, 4, 9, 13, 7, 4, 13, 6, 6, 11, 5, 2, 9, 1);452 let r = _mm_shuffle_epi8(a, b);453 assert_eq_m128i(r, expected);454}455456// Currently one cannot `load` a &[u8] that is less than 16457// in length. This makes loading strings less than 16 in length458// a bit difficult. Rather than `load` and mutate the __m128i,459// it is easier to memcpy the given string to a local slice with460// length 16 and `load` the local slice.461#[cfg(not(jit))]462#[cfg(target_arch = "x86_64")]463#[target_feature(enable = "sse4.2")]464unsafe fn str_to_m128i(s: &[u8]) -> __m128i {465 unsafe {466 assert!(s.len() <= 16);467 let slice = &mut [0u8; 16];468 std::ptr::copy_nonoverlapping(s.as_ptr(), slice.as_mut_ptr(), s.len());469 _mm_loadu_si128(slice.as_ptr() as *const _)470 }471}472473#[cfg(not(jit))]474#[cfg(target_arch = "x86_64")]475#[target_feature(enable = "sse4.2")]476unsafe fn test_mm_cmpestri() {477 unsafe {478 let a = str_to_m128i(b"bar - garbage");479 let b = str_to_m128i(b"foobar");480 let i = _mm_cmpestri::<_SIDD_CMP_EQUAL_ORDERED>(a, 3, b, 6);481 assert_eq!(3, i);482 }483}484485#[cfg(target_arch = "x86_64")]486#[target_feature(enable = "avx2")]487unsafe fn test_mm256_shuffle_epi8() {488 #[rustfmt::skip]489 let a = _mm256_setr_epi8(490 1, 2, 3, 4, 5, 6, 7, 8,491 9, 10, 11, 12, 13, 14, 15, 16,492 17, 18, 19, 20, 21, 22, 23, 24,493 25, 26, 27, 28, 29, 30, 31, 32,494 );495 #[rustfmt::skip]496 let b = _mm256_setr_epi8(497 4, 128u8 as i8, 4, 3, 24, 12, 6, 19,498 12, 5, 5, 10, 4, 1, 8, 0,499 4, 128u8 as i8, 4, 3, 24, 12, 6, 19,500 12, 5, 5, 10, 4, 1, 8, 0,501 );502 #[rustfmt::skip]503 let expected = _mm256_setr_epi8(504 5, 0, 5, 4, 9, 13, 7, 4,505 13, 6, 6, 11, 5, 2, 9, 1,506 21, 0, 21, 20, 25, 29, 23, 20,507 29, 22, 22, 27, 21, 18, 25, 17,508 );509 let r = _mm256_shuffle_epi8(a, b);510 assert_eq_m256i(r, expected);511}512513#[cfg(target_arch = "x86_64")]514#[target_feature(enable = "avx2")]515unsafe fn test_mm256_permute2x128_si256() {516 let a = _mm256_setr_epi64x(100, 200, 500, 600);517 let b = _mm256_setr_epi64x(300, 400, 700, 800);518 let r = _mm256_permute2x128_si256::<0b00_01_00_11>(a, b);519 let e = _mm256_setr_epi64x(700, 800, 500, 600);520 assert_eq_m256i(r, e);521}522523#[cfg(target_arch = "x86_64")]524#[target_feature(enable = "avx2")]525unsafe fn test_mm256_permutevar8x32_epi32() {526 let a = _mm256_setr_epi32(100, 200, 300, 400, 500, 600, 700, 800);527 let idx = _mm256_setr_epi32(7, 6, 5, 4, 3, 2, 1, 0);528 let r = _mm256_setr_epi32(800, 700, 600, 500, 400, 300, 200, 100);529 let e = _mm256_permutevar8x32_epi32(a, idx);530 assert_eq_m256i(r, e);531}532533#[cfg(target_arch = "x86_64")]534#[target_feature(enable = "avx2")]535#[cfg(not(jit))]536unsafe fn test_mm_cvtps_epi32() {537 unsafe {538 let floats: [f32; 4] = [1.5, -2.5, i32::MAX as f32 + 1.0, f32::NAN];539540 let float_vec = _mm_loadu_ps(floats.as_ptr());541 let int_vec = _mm_cvtps_epi32(float_vec);542543 let mut ints: [i32; 4] = [0; 4];544 _mm_storeu_si128(ints.as_mut_ptr() as *mut __m128i, int_vec);545546 // this is very different from `floats.map(|f| f as i32)`!547 let expected_ints: [i32; 4] = [2, -2, i32::MIN, i32::MIN];548549 assert_eq!(ints, expected_ints);550 }551}552553#[cfg(target_arch = "x86_64")]554#[target_feature(enable = "avx2")]555unsafe fn test_mm_cvttps_epi32() {556 unsafe {557 let floats: [f32; 4] = [1.5, -2.5, i32::MAX as f32 + 1.0, f32::NAN];558559 let float_vec = _mm_loadu_ps(floats.as_ptr());560 let int_vec = _mm_cvttps_epi32(float_vec);561562 let mut ints: [i32; 4] = [0; 4];563 _mm_storeu_si128(ints.as_mut_ptr() as *mut __m128i, int_vec);564565 // this is very different from `floats.map(|f| f as i32)`!566 let expected_ints: [i32; 4] = [1, -2, i32::MIN, i32::MIN];567568 assert_eq!(ints, expected_ints);569 }570}571572#[cfg(target_arch = "x86_64")]573#[target_feature(enable = "f16c")]574#[cfg(not(jit))]575unsafe fn test_mm_cvtps_ph() {576 const F16_ONE: i16 = 0x3c00;577 const F16_TWO: i16 = 0x4000;578 const F16_THREE: i16 = 0x4200;579 const F16_FOUR: i16 = 0x4400;580581 let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0);582 let r = _mm_cvtps_ph::<_MM_FROUND_CUR_DIRECTION>(a);583 let e = _mm_set_epi16(0, 0, 0, 0, F16_ONE, F16_TWO, F16_THREE, F16_FOUR);584 assert_eq_m128i(r, e);585}586587#[cfg(target_arch = "x86_64")]588#[cfg(not(jit))]589unsafe fn test_xmm_roundtrip() {590 unsafe {591 let input = [1u8; 16];592 let mut output = [0u8; 16];593594 asm!(595 "movups {xmm}, [{input}]",596 "movups [{output}], {xmm}",597 input = in(reg) input.as_ptr(),598 output = in(reg) output.as_mut_ptr(),599 xmm = out(xmm_reg) _,600 );601602 assert_eq!(input, output);603 }604}605606#[cfg(target_arch = "x86_64")]607#[target_feature(enable = "avx")]608#[cfg(not(jit))]609unsafe fn test_ymm_roundtrip() {610 unsafe {611 let input = [1u8; 32];612 let mut output = [0u8; 32];613614 asm!(615 "vmovups {ymm}, [{input}]",616 "vmovups [{output}], {ymm}",617 input = in(reg) input.as_ptr(),618 output = in(reg) output.as_mut_ptr(),619 ymm = out(ymm_reg) _,620 );621622 assert_eq!(input, output);623 }624}625626#[cfg(target_arch = "x86_64")]627#[target_feature(enable = "avx512f")]628#[cfg(not(jit))]629unsafe fn test_zmm_roundtrip() {630 unsafe {631 let input = [1u8; 64];632 let mut output = [0u8; 64];633634 asm!(635 "vmovups {zmm}, [{input}]",636 "vmovups [{output}], {zmm}",637 input = in(reg) input.as_ptr(),638 output = in(reg) output.as_mut_ptr(),639 zmm = out(zmm_reg) _,640 );641642 assert_eq!(input, output);643 }644}645646fn test_checked_mul() {647 let u: Option<u8> = u8::from_str_radix("1000", 10).ok();648 assert_eq!(u, None);649650 assert_eq!(1u8.checked_mul(255u8), Some(255u8));651 assert_eq!(255u8.checked_mul(255u8), None);652 assert_eq!(1i8.checked_mul(127i8), Some(127i8));653 assert_eq!(127i8.checked_mul(127i8), None);654 assert_eq!((-1i8).checked_mul(-127i8), Some(127i8));655 assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));656 assert_eq!((-128i8).checked_mul(-128i8), None);657658 assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));659 assert_eq!(u64::MAX.checked_mul(u64::MAX), None);660 assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));661 assert_eq!(i64::MAX.checked_mul(i64::MAX), None);662 assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));663 assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));664 assert_eq!(i64::MIN.checked_mul(i64::MIN), None);665}666667#[derive(PartialEq)]668enum LoopState {669 Continue(()),670 Break(()),671}672673pub enum Instruction {674 Increment,675 Loop,676}677678fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {679 match a {680 None => None,681 Some((_, instr)) => Some(instr),682 }683}684685// FIXME enable once inline asm sym references are stabilized in cg_clif686// #[cfg(target_arch = "x86_64")]687// fn inline_asm_call_custom_abi() {688// use std::arch::{asm, naked_asm};689//690// #[unsafe(naked)]691// unsafe extern "custom" fn double() {692// naked_asm!("add rax, rax", "ret");693// }694//695// let mut x: u64 = 21;696// unsafe { asm!("call {}", sym double, inout("rax") x) };697// assert_eq!(x, 42);698// }
Same data, no extra tab — call code_get_file + code_get_findings over MCP from Claude/Cursor/Copilot.