1// This code is automatically generated. DO NOT MODIFY.2//3// Instead, modify `crates/stdarch-gen-arm/spec/sve` and run the following command to re-generate4// this file:5//6// ```7// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec8// ```9#![allow(unused)]10use super::*;11use std::boxed::Box;12use std::convert::{TryFrom, TryInto};13use std::sync::LazyLock;14use std::vec::Vec;15use stdarch_test::simd_test;16static F32_DATA: LazyLock<[f32; 64 * 5]> = LazyLock::new(|| {17 (0..64 * 5)18 .map(|i| i as f32)19 .collect::<Vec<_>>()20 .try_into()21 .expect("f32 data incorrectly initialised")22});23static F64_DATA: LazyLock<[f64; 32 * 5]> = LazyLock::new(|| {24 (0..32 * 5)25 .map(|i| i as f64)26 .collect::<Vec<_>>()27 .try_into()28 .expect("f64 data incorrectly initialised")29});30static I8_DATA: LazyLock<[i8; 256 * 5]> = LazyLock::new(|| {31 (0..256 * 5)32 .map(|i| ((i + 128) % 256 - 128) as i8)33 .collect::<Vec<_>>()34 .try_into()35 .expect("i8 data incorrectly initialised")36});37static I16_DATA: LazyLock<[i16; 128 * 5]> = LazyLock::new(|| {38 (0..128 * 5)39 .map(|i| i as i16)40 .collect::<Vec<_>>()41 .try_into()42 .expect("i16 data incorrectly initialised")43});44static I32_DATA: LazyLock<[i32; 64 * 5]> = LazyLock::new(|| {45 (0..64 * 5)46 .map(|i| i as i32)47 .collect::<Vec<_>>()48 .try_into()49 .expect("i32 data incorrectly initialised")50});51static I64_DATA: LazyLock<[i64; 32 * 5]> = LazyLock::new(|| {52 (0..32 * 5)53 .map(|i| i as i64)54 .collect::<Vec<_>>()55 .try_into()56 .expect("i64 data incorrectly initialised")57});58static U8_DATA: LazyLock<[u8; 256 * 5]> = LazyLock::new(|| {59 (0..256 * 5)60 .map(|i| i as u8)61 .collect::<Vec<_>>()62 .try_into()63 .expect("u8 data incorrectly initialised")64});65static U16_DATA: LazyLock<[u16; 128 * 5]> = LazyLock::new(|| {66 (0..128 * 5)67 .map(|i| i as u16)68 .collect::<Vec<_>>()69 .try_into()70 .expect("u16 data incorrectly initialised")71});72static U32_DATA: LazyLock<[u32; 64 * 5]> = LazyLock::new(|| {73 (0..64 * 5)74 .map(|i| i as u32)75 .collect::<Vec<_>>()76 .try_into()77 .expect("u32 data incorrectly initialised")78});79static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| {80 (0..32 * 5)81 .map(|i| i as u64)82 .collect::<Vec<_>>()83 .try_into()84 .expect("u64 data incorrectly initialised")85});86#[target_feature(enable = "sve")]87fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) {88 let defined = svrdffr();89 assert!(svptest_first(svptrue_b32(), defined));90 let cmp = svcmpne_f32(defined, vector, expected);91 assert!(!svptest_any(defined, cmp))92}93#[target_feature(enable = "sve")]94fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) {95 let defined = svrdffr();96 assert!(svptest_first(svptrue_b64(), defined));97 let cmp = svcmpne_f64(defined, vector, expected);98 assert!(!svptest_any(defined, cmp))99}100#[target_feature(enable = "sve")]101fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) {102 let defined = svrdffr();103 assert!(svptest_first(svptrue_b8(), defined));104 let cmp = svcmpne_s8(defined, vector, expected);105 assert!(!svptest_any(defined, cmp))106}107#[target_feature(enable = "sve")]108fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) {109 let defined = svrdffr();110 assert!(svptest_first(svptrue_b16(), defined));111 let cmp = svcmpne_s16(defined, vector, expected);112 assert!(!svptest_any(defined, cmp))113}114#[target_feature(enable = "sve")]115fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) {116 let defined = svrdffr();117 assert!(svptest_first(svptrue_b32(), defined));118 let cmp = svcmpne_s32(defined, vector, expected);119 assert!(!svptest_any(defined, cmp))120}121#[target_feature(enable = "sve")]122fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) {123 let defined = svrdffr();124 assert!(svptest_first(svptrue_b64(), defined));125 let cmp = svcmpne_s64(defined, vector, expected);126 assert!(!svptest_any(defined, cmp))127}128#[target_feature(enable = "sve")]129fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) {130 let defined = svrdffr();131 assert!(svptest_first(svptrue_b8(), defined));132 let cmp = svcmpne_u8(defined, vector, expected);133 assert!(!svptest_any(defined, cmp))134}135#[target_feature(enable = "sve")]136fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) {137 let defined = svrdffr();138 assert!(svptest_first(svptrue_b16(), defined));139 let cmp = svcmpne_u16(defined, vector, expected);140 assert!(!svptest_any(defined, cmp))141}142#[target_feature(enable = "sve")]143fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) {144 let defined = svrdffr();145 assert!(svptest_first(svptrue_b32(), defined));146 let cmp = svcmpne_u32(defined, vector, expected);147 assert!(!svptest_any(defined, cmp))148}149#[target_feature(enable = "sve")]150fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) {151 let defined = svrdffr();152 assert!(svptest_first(svptrue_b64(), defined));153 let cmp = svcmpne_u64(defined, vector, expected);154 assert!(!svptest_any(defined, cmp))155}156#[simd_test(enable = "sve")]157unsafe fn test_svld1_f32_with_svst1_f32() {158 let mut storage = [0 as f32; 320usize];159 let data = svcvt_f32_s32_x(160 svptrue_b32(),161 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),162 );163 svst1_f32(svptrue_b32(), storage.as_mut_ptr(), data);164 for (i, &val) in storage.iter().enumerate() {165 assert!(val == 0 as f32 || val == i as f32);166 }167 svsetffr();168 let loaded = svld1_f32(svptrue_b32(), storage.as_ptr() as *const f32);169 assert_vector_matches_f32(170 loaded,171 svcvt_f32_s32_x(172 svptrue_b32(),173 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),174 ),175 );176}177#[simd_test(enable = "sve")]178unsafe fn test_svld1_f64_with_svst1_f64() {179 let mut storage = [0 as f64; 160usize];180 let data = svcvt_f64_s64_x(181 svptrue_b64(),182 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),183 );184 svst1_f64(svptrue_b64(), storage.as_mut_ptr(), data);185 for (i, &val) in storage.iter().enumerate() {186 assert!(val == 0 as f64 || val == i as f64);187 }188 svsetffr();189 let loaded = svld1_f64(svptrue_b64(), storage.as_ptr() as *const f64);190 assert_vector_matches_f64(191 loaded,192 svcvt_f64_s64_x(193 svptrue_b64(),194 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),195 ),196 );197}198#[simd_test(enable = "sve")]199unsafe fn test_svld1_s8_with_svst1_s8() {200 let mut storage = [0 as i8; 1280usize];201 let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());202 svst1_s8(svptrue_b8(), storage.as_mut_ptr(), data);203 for (i, &val) in storage.iter().enumerate() {204 assert!(val == 0 as i8 || val == i as i8);205 }206 svsetffr();207 let loaded = svld1_s8(svptrue_b8(), storage.as_ptr() as *const i8);208 assert_vector_matches_i8(209 loaded,210 svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),211 );212}213#[simd_test(enable = "sve")]214unsafe fn test_svld1_s16_with_svst1_s16() {215 let mut storage = [0 as i16; 640usize];216 let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());217 svst1_s16(svptrue_b16(), storage.as_mut_ptr(), data);218 for (i, &val) in storage.iter().enumerate() {219 assert!(val == 0 as i16 || val == i as i16);220 }221 svsetffr();222 let loaded = svld1_s16(svptrue_b16(), storage.as_ptr() as *const i16);223 assert_vector_matches_i16(224 loaded,225 svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),226 );227}228#[simd_test(enable = "sve")]229unsafe fn test_svld1_s32_with_svst1_s32() {230 let mut storage = [0 as i32; 320usize];231 let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());232 svst1_s32(svptrue_b32(), storage.as_mut_ptr(), data);233 for (i, &val) in storage.iter().enumerate() {234 assert!(val == 0 as i32 || val == i as i32);235 }236 svsetffr();237 let loaded = svld1_s32(svptrue_b32(), storage.as_ptr() as *const i32);238 assert_vector_matches_i32(239 loaded,240 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),241 );242}243#[simd_test(enable = "sve")]244unsafe fn test_svld1_s64_with_svst1_s64() {245 let mut storage = [0 as i64; 160usize];246 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());247 svst1_s64(svptrue_b64(), storage.as_mut_ptr(), data);248 for (i, &val) in storage.iter().enumerate() {249 assert!(val == 0 as i64 || val == i as i64);250 }251 svsetffr();252 let loaded = svld1_s64(svptrue_b64(), storage.as_ptr() as *const i64);253 assert_vector_matches_i64(254 loaded,255 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),256 );257}258#[simd_test(enable = "sve")]259unsafe fn test_svld1_u8_with_svst1_u8() {260 let mut storage = [0 as u8; 1280usize];261 let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());262 svst1_u8(svptrue_b8(), storage.as_mut_ptr(), data);263 for (i, &val) in storage.iter().enumerate() {264 assert!(val == 0 as u8 || val == i as u8);265 }266 svsetffr();267 let loaded = svld1_u8(svptrue_b8(), storage.as_ptr() as *const u8);268 assert_vector_matches_u8(269 loaded,270 svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),271 );272}273#[simd_test(enable = "sve")]274unsafe fn test_svld1_u16_with_svst1_u16() {275 let mut storage = [0 as u16; 640usize];276 let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());277 svst1_u16(svptrue_b16(), storage.as_mut_ptr(), data);278 for (i, &val) in storage.iter().enumerate() {279 assert!(val == 0 as u16 || val == i as u16);280 }281 svsetffr();282 let loaded = svld1_u16(svptrue_b16(), storage.as_ptr() as *const u16);283 assert_vector_matches_u16(284 loaded,285 svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),286 );287}288#[simd_test(enable = "sve")]289unsafe fn test_svld1_u32_with_svst1_u32() {290 let mut storage = [0 as u32; 320usize];291 let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());292 svst1_u32(svptrue_b32(), storage.as_mut_ptr(), data);293 for (i, &val) in storage.iter().enumerate() {294 assert!(val == 0 as u32 || val == i as u32);295 }296 svsetffr();297 let loaded = svld1_u32(svptrue_b32(), storage.as_ptr() as *const u32);298 assert_vector_matches_u32(299 loaded,300 svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),301 );302}303#[simd_test(enable = "sve")]304unsafe fn test_svld1_u64_with_svst1_u64() {305 let mut storage = [0 as u64; 160usize];306 let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());307 svst1_u64(svptrue_b64(), storage.as_mut_ptr(), data);308 for (i, &val) in storage.iter().enumerate() {309 assert!(val == 0 as u64 || val == i as u64);310 }311 svsetffr();312 let loaded = svld1_u64(svptrue_b64(), storage.as_ptr() as *const u64);313 assert_vector_matches_u64(314 loaded,315 svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),316 );317}318#[simd_test(enable = "sve")]319unsafe fn test_svld1_gather_s32index_f32_with_svst1_scatter_s32index_f32() {320 let mut storage = [0 as f32; 320usize];321 let data = svcvt_f32_s32_x(322 svptrue_b32(),323 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),324 );325 let indices = svindex_s32(0, 1);326 svst1_scatter_s32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data);327 for (i, &val) in storage.iter().enumerate() {328 assert!(val == 0 as f32 || val == i as f32);329 }330 svsetffr();331 let loaded = svld1_gather_s32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices);332 assert_vector_matches_f32(333 loaded,334 svcvt_f32_s32_x(335 svptrue_b32(),336 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),337 ),338 );339}340#[simd_test(enable = "sve")]341unsafe fn test_svld1_gather_s32index_s32_with_svst1_scatter_s32index_s32() {342 let mut storage = [0 as i32; 320usize];343 let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());344 let indices = svindex_s32(0, 1);345 svst1_scatter_s32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data);346 for (i, &val) in storage.iter().enumerate() {347 assert!(val == 0 as i32 || val == i as i32);348 }349 svsetffr();350 let loaded = svld1_gather_s32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices);351 assert_vector_matches_i32(352 loaded,353 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),354 );355}356#[simd_test(enable = "sve")]357unsafe fn test_svld1_gather_s32index_u32_with_svst1_scatter_s32index_u32() {358 let mut storage = [0 as u32; 320usize];359 let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());360 let indices = svindex_s32(0, 1);361 svst1_scatter_s32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data);362 for (i, &val) in storage.iter().enumerate() {363 assert!(val == 0 as u32 || val == i as u32);364 }365 svsetffr();366 let loaded = svld1_gather_s32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices);367 assert_vector_matches_u32(368 loaded,369 svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),370 );371}372#[simd_test(enable = "sve")]373unsafe fn test_svld1_gather_s64index_f64_with_svst1_scatter_s64index_f64() {374 let mut storage = [0 as f64; 160usize];375 let data = svcvt_f64_s64_x(376 svptrue_b64(),377 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),378 );379 let indices = svindex_s64(0, 1);380 svst1_scatter_s64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data);381 for (i, &val) in storage.iter().enumerate() {382 assert!(val == 0 as f64 || val == i as f64);383 }384 svsetffr();385 let loaded = svld1_gather_s64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices);386 assert_vector_matches_f64(387 loaded,388 svcvt_f64_s64_x(389 svptrue_b64(),390 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),391 ),392 );393}394#[simd_test(enable = "sve")]395unsafe fn test_svld1_gather_s64index_s64_with_svst1_scatter_s64index_s64() {396 let mut storage = [0 as i64; 160usize];397 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());398 let indices = svindex_s64(0, 1);399 svst1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);400 for (i, &val) in storage.iter().enumerate() {401 assert!(val == 0 as i64 || val == i as i64);402 }403 svsetffr();404 let loaded = svld1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);405 assert_vector_matches_i64(406 loaded,407 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),408 );409}410#[simd_test(enable = "sve")]411unsafe fn test_svld1_gather_s64index_u64_with_svst1_scatter_s64index_u64() {412 let mut storage = [0 as u64; 160usize];413 let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());414 let indices = svindex_s64(0, 1);415 svst1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);416 for (i, &val) in storage.iter().enumerate() {417 assert!(val == 0 as u64 || val == i as u64);418 }419 svsetffr();420 let loaded = svld1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);421 assert_vector_matches_u64(422 loaded,423 svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),424 );425}426#[simd_test(enable = "sve")]427unsafe fn test_svld1_gather_u32index_f32_with_svst1_scatter_u32index_f32() {428 let mut storage = [0 as f32; 320usize];429 let data = svcvt_f32_s32_x(430 svptrue_b32(),431 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),432 );433 let indices = svindex_u32(0, 1);434 svst1_scatter_u32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data);435 for (i, &val) in storage.iter().enumerate() {436 assert!(val == 0 as f32 || val == i as f32);437 }438 svsetffr();439 let loaded = svld1_gather_u32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices);440 assert_vector_matches_f32(441 loaded,442 svcvt_f32_s32_x(443 svptrue_b32(),444 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),445 ),446 );447}448#[simd_test(enable = "sve")]449unsafe fn test_svld1_gather_u32index_s32_with_svst1_scatter_u32index_s32() {450 let mut storage = [0 as i32; 320usize];451 let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());452 let indices = svindex_u32(0, 1);453 svst1_scatter_u32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data);454 for (i, &val) in storage.iter().enumerate() {455 assert!(val == 0 as i32 || val == i as i32);456 }457 svsetffr();458 let loaded = svld1_gather_u32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices);459 assert_vector_matches_i32(460 loaded,461 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),462 );463}464#[simd_test(enable = "sve")]465unsafe fn test_svld1_gather_u32index_u32_with_svst1_scatter_u32index_u32() {466 let mut storage = [0 as u32; 320usize];467 let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());468 let indices = svindex_u32(0, 1);469 svst1_scatter_u32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data);470 for (i, &val) in storage.iter().enumerate() {471 assert!(val == 0 as u32 || val == i as u32);472 }473 svsetffr();474 let loaded = svld1_gather_u32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices);475 assert_vector_matches_u32(476 loaded,477 svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),478 );479}480#[simd_test(enable = "sve")]481unsafe fn test_svld1_gather_u64index_f64_with_svst1_scatter_u64index_f64() {482 let mut storage = [0 as f64; 160usize];483 let data = svcvt_f64_s64_x(484 svptrue_b64(),485 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),486 );487 let indices = svindex_u64(0, 1);488 svst1_scatter_u64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data);489 for (i, &val) in storage.iter().enumerate() {490 assert!(val == 0 as f64 || val == i as f64);491 }492 svsetffr();493 let loaded = svld1_gather_u64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices);494 assert_vector_matches_f64(495 loaded,496 svcvt_f64_s64_x(497 svptrue_b64(),498 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),499 ),500 );501}502#[simd_test(enable = "sve")]503unsafe fn test_svld1_gather_u64index_s64_with_svst1_scatter_u64index_s64() {504 let mut storage = [0 as i64; 160usize];505 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());506 let indices = svindex_u64(0, 1);507 svst1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);508 for (i, &val) in storage.iter().enumerate() {509 assert!(val == 0 as i64 || val == i as i64);510 }511 svsetffr();512 let loaded = svld1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);513 assert_vector_matches_i64(514 loaded,515 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),516 );517}518#[simd_test(enable = "sve")]519unsafe fn test_svld1_gather_u64index_u64_with_svst1_scatter_u64index_u64() {520 let mut storage = [0 as u64; 160usize];521 let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());522 let indices = svindex_u64(0, 1);523 svst1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);524 for (i, &val) in storage.iter().enumerate() {525 assert!(val == 0 as u64 || val == i as u64);526 }527 svsetffr();528 let loaded = svld1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);529 assert_vector_matches_u64(530 loaded,531 svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),532 );533}534#[simd_test(enable = "sve")]535unsafe fn test_svld1_gather_s32offset_f32_with_svst1_scatter_s32offset_f32() {536 let mut storage = [0 as f32; 320usize];537 let data = svcvt_f32_s32_x(538 svptrue_b32(),539 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),540 );541 let offsets = svindex_s32(0, 4u32.try_into().unwrap());542 svst1_scatter_s32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);543 for (i, &val) in storage.iter().enumerate() {544 assert!(val == 0 as f32 || val == i as f32);545 }546 svsetffr();547 let loaded = svld1_gather_s32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets);548 assert_vector_matches_f32(549 loaded,550 svcvt_f32_s32_x(551 svptrue_b32(),552 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),553 ),554 );555}556#[simd_test(enable = "sve")]557unsafe fn test_svld1_gather_s32offset_s32_with_svst1_scatter_s32offset_s32() {558 let mut storage = [0 as i32; 320usize];559 let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());560 let offsets = svindex_s32(0, 4u32.try_into().unwrap());561 svst1_scatter_s32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);562 for (i, &val) in storage.iter().enumerate() {563 assert!(val == 0 as i32 || val == i as i32);564 }565 svsetffr();566 let loaded = svld1_gather_s32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets);567 assert_vector_matches_i32(568 loaded,569 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),570 );571}572#[simd_test(enable = "sve")]573unsafe fn test_svld1_gather_s32offset_u32_with_svst1_scatter_s32offset_u32() {574 let mut storage = [0 as u32; 320usize];575 let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());576 let offsets = svindex_s32(0, 4u32.try_into().unwrap());577 svst1_scatter_s32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);578 for (i, &val) in storage.iter().enumerate() {579 assert!(val == 0 as u32 || val == i as u32);580 }581 svsetffr();582 let loaded = svld1_gather_s32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets);583 assert_vector_matches_u32(584 loaded,585 svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),586 );587}588#[simd_test(enable = "sve")]589unsafe fn test_svld1_gather_s64offset_f64_with_svst1_scatter_s64offset_f64() {590 let mut storage = [0 as f64; 160usize];591 let data = svcvt_f64_s64_x(592 svptrue_b64(),593 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),594 );595 let offsets = svindex_s64(0, 8u32.try_into().unwrap());596 svst1_scatter_s64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);597 for (i, &val) in storage.iter().enumerate() {598 assert!(val == 0 as f64 || val == i as f64);599 }600 svsetffr();601 let loaded = svld1_gather_s64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets);602 assert_vector_matches_f64(603 loaded,604 svcvt_f64_s64_x(605 svptrue_b64(),606 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),607 ),608 );609}610#[simd_test(enable = "sve")]611unsafe fn test_svld1_gather_s64offset_s64_with_svst1_scatter_s64offset_s64() {612 let mut storage = [0 as i64; 160usize];613 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());614 let offsets = svindex_s64(0, 8u32.try_into().unwrap());615 svst1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);616 for (i, &val) in storage.iter().enumerate() {617 assert!(val == 0 as i64 || val == i as i64);618 }619 svsetffr();620 let loaded = svld1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);621 assert_vector_matches_i64(622 loaded,623 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),624 );625}626#[simd_test(enable = "sve")]627unsafe fn test_svld1_gather_s64offset_u64_with_svst1_scatter_s64offset_u64() {628 let mut storage = [0 as u64; 160usize];629 let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());630 let offsets = svindex_s64(0, 8u32.try_into().unwrap());631 svst1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);632 for (i, &val) in storage.iter().enumerate() {633 assert!(val == 0 as u64 || val == i as u64);634 }635 svsetffr();636 let loaded = svld1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);637 assert_vector_matches_u64(638 loaded,639 svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),640 );641}642#[simd_test(enable = "sve")]643unsafe fn test_svld1_gather_u32offset_f32_with_svst1_scatter_u32offset_f32() {644 let mut storage = [0 as f32; 320usize];645 let data = svcvt_f32_s32_x(646 svptrue_b32(),647 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),648 );649 let offsets = svindex_u32(0, 4u32.try_into().unwrap());650 svst1_scatter_u32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);651 for (i, &val) in storage.iter().enumerate() {652 assert!(val == 0 as f32 || val == i as f32);653 }654 svsetffr();655 let loaded = svld1_gather_u32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets);656 assert_vector_matches_f32(657 loaded,658 svcvt_f32_s32_x(659 svptrue_b32(),660 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),661 ),662 );663}664#[simd_test(enable = "sve")]665unsafe fn test_svld1_gather_u32offset_s32_with_svst1_scatter_u32offset_s32() {666 let mut storage = [0 as i32; 320usize];667 let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());668 let offsets = svindex_u32(0, 4u32.try_into().unwrap());669 svst1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);670 for (i, &val) in storage.iter().enumerate() {671 assert!(val == 0 as i32 || val == i as i32);672 }673 svsetffr();674 let loaded = svld1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets);675 assert_vector_matches_i32(676 loaded,677 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),678 );679}680#[simd_test(enable = "sve")]681unsafe fn test_svld1_gather_u32offset_u32_with_svst1_scatter_u32offset_u32() {682 let mut storage = [0 as u32; 320usize];683 let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());684 let offsets = svindex_u32(0, 4u32.try_into().unwrap());685 svst1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);686 for (i, &val) in storage.iter().enumerate() {687 assert!(val == 0 as u32 || val == i as u32);688 }689 svsetffr();690 let loaded = svld1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets);691 assert_vector_matches_u32(692 loaded,693 svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),694 );695}696#[simd_test(enable = "sve")]697unsafe fn test_svld1_gather_u64offset_f64_with_svst1_scatter_u64offset_f64() {698 let mut storage = [0 as f64; 160usize];699 let data = svcvt_f64_s64_x(700 svptrue_b64(),701 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),702 );703 let offsets = svindex_u64(0, 8u32.try_into().unwrap());704 svst1_scatter_u64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);705 for (i, &val) in storage.iter().enumerate() {706 assert!(val == 0 as f64 || val == i as f64);707 }708 svsetffr();709 let loaded = svld1_gather_u64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets);710 assert_vector_matches_f64(711 loaded,712 svcvt_f64_s64_x(713 svptrue_b64(),714 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),715 ),716 );717}718#[simd_test(enable = "sve")]719unsafe fn test_svld1_gather_u64offset_s64_with_svst1_scatter_u64offset_s64() {720 let mut storage = [0 as i64; 160usize];721 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());722 let offsets = svindex_u64(0, 8u32.try_into().unwrap());723 svst1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);724 for (i, &val) in storage.iter().enumerate() {725 assert!(val == 0 as i64 || val == i as i64);726 }727 svsetffr();728 let loaded = svld1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);729 assert_vector_matches_i64(730 loaded,731 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),732 );733}734#[simd_test(enable = "sve")]735unsafe fn test_svld1_gather_u64offset_u64_with_svst1_scatter_u64offset_u64() {736 let mut storage = [0 as u64; 160usize];737 let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());738 let offsets = svindex_u64(0, 8u32.try_into().unwrap());739 svst1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);740 for (i, &val) in storage.iter().enumerate() {741 assert!(val == 0 as u64 || val == i as u64);742 }743 svsetffr();744 let loaded = svld1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);745 assert_vector_matches_u64(746 loaded,747 svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),748 );749}750#[simd_test(enable = "sve")]751unsafe fn test_svld1_gather_u64base_f64_with_svst1_scatter_u64base_f64() {752 let mut storage = [0 as f64; 160usize];753 let data = svcvt_f64_s64_x(754 svptrue_b64(),755 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),756 );757 let bases = svdup_n_u64(storage.as_ptr() as u64);758 let offsets = svindex_u64(0, 8u32.try_into().unwrap());759 let bases = svadd_u64_x(svptrue_b64(), bases, offsets);760 svst1_scatter_u64base_f64(svptrue_b64(), bases, data);761 for (i, &val) in storage.iter().enumerate() {762 assert!(val == 0 as f64 || val == i as f64);763 }764 svsetffr();765 let loaded = svld1_gather_u64base_f64(svptrue_b64(), bases);766 assert_vector_matches_f64(767 loaded,768 svcvt_f64_s64_x(769 svptrue_b64(),770 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),771 ),772 );773}774#[simd_test(enable = "sve")]775unsafe fn test_svld1_gather_u64base_s64_with_svst1_scatter_u64base_s64() {776 let mut storage = [0 as i64; 160usize];777 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());778 let bases = svdup_n_u64(storage.as_ptr() as u64);779 let offsets = svindex_u64(0, 8u32.try_into().unwrap());780 let bases = svadd_u64_x(svptrue_b64(), bases, offsets);781 svst1_scatter_u64base_s64(svptrue_b64(), bases, data);782 for (i, &val) in storage.iter().enumerate() {783 assert!(val == 0 as i64 || val == i as i64);784 }785 svsetffr();786 let loaded = svld1_gather_u64base_s64(svptrue_b64(), bases);787 assert_vector_matches_i64(788 loaded,789 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),790 );791}792#[simd_test(enable = "sve")]793unsafe fn test_svld1_gather_u64base_u64_with_svst1_scatter_u64base_u64() {794 let mut storage = [0 as u64; 160usize];795 let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());796 let bases = svdup_n_u64(storage.as_ptr() as u64);797 let offsets = svindex_u64(0, 8u32.try_into().unwrap());798 let bases = svadd_u64_x(svptrue_b64(), bases, offsets);799 svst1_scatter_u64base_u64(svptrue_b64(), bases, data);800 for (i, &val) in storage.iter().enumerate() {801 assert!(val == 0 as u64 || val == i as u64);802 }803 svsetffr();804 let loaded = svld1_gather_u64base_u64(svptrue_b64(), bases);805 assert_vector_matches_u64(806 loaded,807 svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),808 );809}810#[simd_test(enable = "sve")]811unsafe fn test_svld1_gather_u32base_index_f32_with_svst1_scatter_u32base_index_f32() {812 let mut storage = [0 as f32; 320usize];813 let data = svcvt_f32_s32_x(814 svptrue_b32(),815 svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),816 );817 let bases = svindex_u32(0, 4u32.try_into().unwrap());818 svst1_scatter_u32base_index_f32(819 svptrue_b32(),820 bases,821 storage.as_ptr() as i64 / (4u32 as i64) + 1,822 data,823 );824 for (i, &val) in storage.iter().enumerate() {825 assert!(val == 0 as f32 || val == i as f32);826 }827 svsetffr();828 let loaded = svld1_gather_u32base_index_f32(829 svptrue_b32(),830 bases,831 storage.as_ptr() as i64 / (4u32 as i64) + 1,832 );833 assert_vector_matches_f32(834 loaded,835 svcvt_f32_s32_x(836 svptrue_b32(),837 svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),838 ),839 );840}841#[simd_test(enable = "sve")]842unsafe fn test_svld1_gather_u32base_index_s32_with_svst1_scatter_u32base_index_s32() {843 let mut storage = [0 as i32; 320usize];844 let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());845 let bases = svindex_u32(0, 4u32.try_into().unwrap());846 svst1_scatter_u32base_index_s32(847 svptrue_b32(),848 bases,849 storage.as_ptr() as i64 / (4u32 as i64) + 1,850 data,851 );852 for (i, &val) in storage.iter().enumerate() {853 assert!(val == 0 as i32 || val == i as i32);854 }855 svsetffr();856 let loaded = svld1_gather_u32base_index_s32(857 svptrue_b32(),858 bases,859 storage.as_ptr() as i64 / (4u32 as i64) + 1,860 );861 assert_vector_matches_i32(862 loaded,863 svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),864 );865}866#[simd_test(enable = "sve")]867unsafe fn test_svld1_gather_u32base_index_u32_with_svst1_scatter_u32base_index_u32() {868 let mut storage = [0 as u32; 320usize];869 let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());870 let bases = svindex_u32(0, 4u32.try_into().unwrap());871 svst1_scatter_u32base_index_u32(872 svptrue_b32(),873 bases,874 storage.as_ptr() as i64 / (4u32 as i64) + 1,875 data,876 );877 for (i, &val) in storage.iter().enumerate() {878 assert!(val == 0 as u32 || val == i as u32);879 }880 svsetffr();881 let loaded = svld1_gather_u32base_index_u32(882 svptrue_b32(),883 bases,884 storage.as_ptr() as i64 / (4u32 as i64) + 1,885 );886 assert_vector_matches_u32(887 loaded,888 svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),889 );890}891#[simd_test(enable = "sve")]892unsafe fn test_svld1_gather_u64base_index_f64_with_svst1_scatter_u64base_index_f64() {893 let mut storage = [0 as f64; 160usize];894 let data = svcvt_f64_s64_x(895 svptrue_b64(),896 svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),897 );898 let bases = svdup_n_u64(storage.as_ptr() as u64);899 let offsets = svindex_u64(0, 8u32.try_into().unwrap());900 let bases = svadd_u64_x(svptrue_b64(), bases, offsets);901 svst1_scatter_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap(), data);902 for (i, &val) in storage.iter().enumerate() {903 assert!(val == 0 as f64 || val == i as f64);904 }905 svsetffr();906 let loaded = svld1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap());907 assert_vector_matches_f64(908 loaded,909 svcvt_f64_s64_x(910 svptrue_b64(),911 svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),912 ),913 );914}915#[simd_test(enable = "sve")]916unsafe fn test_svld1_gather_u64base_index_s64_with_svst1_scatter_u64base_index_s64() {917 let mut storage = [0 as i64; 160usize];918 let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());919 let bases = svdup_n_u64(storage.as_ptr() as u64);920 let offsets = svindex_u64(0, 8u32.try_into().unwrap());921 let bases = svadd_u64_x(svptrue_b64(), bases, offsets);922 svst1_scatter_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap(), data);923 for (i, &val) in storage.iter().enumerate() {924 assert!(val == 0 as i64 || val == i as i64);925 }926 svsetffr();927 let loaded = svld1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap());928 assert_vector_matches_i64(929 loaded,930 svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),931 );932}933#[simd_test(enable = "sve")]934unsafe fn test_svld1_gather_u64base_index_u64_with_svst1_scatter_u64base_index_u64() {935 let mut storage = [0 as u64; 160usize];936 let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());937 let bases = svdup_n_u64(storage.as_ptr() as u64);938 let offsets = svindex_u64(0, 8u32.try_into().unwrap());939 let bases = svadd_u64_x(svptrue_b64(), bases, offsets);940 svst1_scatter_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap(), data);941 for (i, &val) in storage.iter().enumerate() {942 assert!(val == 0 as u64 || val == i as u64);943 }944 svsetffr();945 let loaded = svld1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap());946 assert_vector_matches_u64(947 loaded,948 svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),949 );950}951#[simd_test(enable = "sve")]952unsafe fn test_svld1_gather_u32base_offset_f32_with_svst1_scatter_u32base_offset_f32() {953 let mut storage = [0 as f32; 320usize];954 let data = svcvt_f32_s32_x(955 svptrue_b32(),956 svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),957 );958 let bases = svindex_u32(0, 4u32.try_into().unwrap());959 svst1_scatter_u32base_offset_f32(960 svptrue_b32(),961 bases,962 storage.as_ptr() as i64 + 4u32 as i64,963 data,964 );965 for (i, &val) in storage.iter().enumerate() {966 assert!(val == 0 as f32 || val == i as f32);967 }968 svsetffr();969 let loaded = svld1_gather_u32base_offset_f32(970 svptrue_b32(),971 bases,972 storage.as_ptr() as i64 + 4u32 as i64,973 );974 assert_vector_matches_f32(975 loaded,976 svcvt_f32_s32_x(977 svptrue_b32(),978 svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),979 ),980 );981}982#[simd_test(enable = "sve")]983unsafe fn test_svld1_gather_u32base_offset_s32_with_svst1_scatter_u32base_offset_s32() {984 let mut storage = [0 as i32; 320usize];985 let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());986 let bases = svindex_u32(0, 4u32.try_into().unwrap());987 svst1_scatter_u32base_offset_s32(988 svptrue_b32(),989 bases,990 storage.as_ptr() as i64 + 4u32 as i64,991 data,992 );993 for (i, &val) in storage.iter().enumerate() {994 assert!(val == 0 as i32 || val == i as i32);995 }996 svsetffr();997 let loaded = svld1_gather_u32base_offset_s32(998 svptrue_b32(),999 bases,1000 storage.as_ptr() as i64 + 4u32 as i64,1001 );1002 assert_vector_matches_i32(1003 loaded,1004 svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),1005 );1006}1007#[simd_test(enable = "sve")]1008unsafe fn test_svld1_gather_u32base_offset_u32_with_svst1_scatter_u32base_offset_u32() {1009 let mut storage = [0 as u32; 320usize];1010 let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());1011 let bases = svindex_u32(0, 4u32.try_into().unwrap());1012 svst1_scatter_u32base_offset_u32(1013 svptrue_b32(),1014 bases,1015 storage.as_ptr() as i64 + 4u32 as i64,1016 data,1017 );1018 for (i, &val) in storage.iter().enumerate() {1019 assert!(val == 0 as u32 || val == i as u32);1020 }1021 svsetffr();1022 let loaded = svld1_gather_u32base_offset_u32(1023 svptrue_b32(),1024 bases,1025 storage.as_ptr() as i64 + 4u32 as i64,1026 );1027 assert_vector_matches_u32(1028 loaded,1029 svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),1030 );1031}1032#[simd_test(enable = "sve")]1033unsafe fn test_svld1_gather_u64base_offset_f64_with_svst1_scatter_u64base_offset_f64() {1034 let mut storage = [0 as f64; 160usize];1035 let data = svcvt_f64_s64_x(1036 svptrue_b64(),1037 svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),1038 );1039 let bases = svdup_n_u64(storage.as_ptr() as u64);1040 let offsets = svindex_u64(0, 8u32.try_into().unwrap());1041 let bases = svadd_u64_x(svptrue_b64(), bases, offsets);1042 svst1_scatter_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data);1043 for (i, &val) in storage.iter().enumerate() {1044 assert!(val == 0 as f64 || val == i as f64);1045 }1046 svsetffr();1047 let loaded = svld1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap());1048 assert_vector_matches_f64(1049 loaded,1050 svcvt_f64_s64_x(1051 svptrue_b64(),1052 svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),1053 ),1054 );1055}1056#[simd_test(enable = "sve")]1057unsafe fn test_svld1_gather_u64base_offset_s64_with_svst1_scatter_u64base_offset_s64() {1058 let mut storage = [0 as i64; 160usize];1059 let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());1060 let bases = svdup_n_u64(storage.as_ptr() as u64);1061 let offsets = svindex_u64(0, 8u32.try_into().unwrap());1062 let bases = svadd_u64_x(svptrue_b64(), bases, offsets);1063 svst1_scatter_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data);1064 for (i, &val) in storage.iter().enumerate() {1065 assert!(val == 0 as i64 || val == i as i64);1066 }1067 svsetffr();1068 let loaded = svld1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap());1069 assert_vector_matches_i64(1070 loaded,1071 svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),1072 );1073}1074#[simd_test(enable = "sve")]1075unsafe fn test_svld1_gather_u64base_offset_u64_with_svst1_scatter_u64base_offset_u64() {1076 let mut storage = [0 as u64; 160usize];1077 let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());1078 let bases = svdup_n_u64(storage.as_ptr() as u64);1079 let offsets = svindex_u64(0, 8u32.try_into().unwrap());1080 let bases = svadd_u64_x(svptrue_b64(), bases, offsets);1081 svst1_scatter_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data);1082 for (i, &val) in storage.iter().enumerate() {1083 assert!(val == 0 as u64 || val == i as u64);1084 }1085 svsetffr();1086 let loaded = svld1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap());1087 assert_vector_matches_u64(1088 loaded,1089 svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),1090 );1091}1092#[simd_test(enable = "sve")]1093unsafe fn test_svld1_vnum_f32_with_svst1_vnum_f32() {1094 let len = svcntw() as usize;1095 let mut storage = [0 as f32; 320usize];1096 let data = svcvt_f32_s32_x(1097 svptrue_b32(),1098 svindex_s32(1099 (len + 0usize).try_into().unwrap(),1100 1usize.try_into().unwrap(),1101 ),1102 );1103 svst1_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data);1104 for (i, &val) in storage.iter().enumerate() {1105 assert!(val == 0 as f32 || val == i as f32);1106 }1107 svsetffr();1108 let loaded = svld1_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1);1109 assert_vector_matches_f32(1110 loaded,1111 svcvt_f32_s32_x(1112 svptrue_b32(),1113 svindex_s32(1114 (len + 0usize).try_into().unwrap(),1115 1usize.try_into().unwrap(),1116 ),1117 ),1118 );1119}1120#[simd_test(enable = "sve")]1121unsafe fn test_svld1_vnum_f64_with_svst1_vnum_f64() {1122 let len = svcntd() as usize;1123 let mut storage = [0 as f64; 160usize];1124 let data = svcvt_f64_s64_x(1125 svptrue_b64(),1126 svindex_s64(1127 (len + 0usize).try_into().unwrap(),1128 1usize.try_into().unwrap(),1129 ),1130 );1131 svst1_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data);1132 for (i, &val) in storage.iter().enumerate() {1133 assert!(val == 0 as f64 || val == i as f64);1134 }1135 svsetffr();1136 let loaded = svld1_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1);1137 assert_vector_matches_f64(1138 loaded,1139 svcvt_f64_s64_x(1140 svptrue_b64(),1141 svindex_s64(1142 (len + 0usize).try_into().unwrap(),1143 1usize.try_into().unwrap(),1144 ),1145 ),1146 );1147}1148#[simd_test(enable = "sve")]1149unsafe fn test_svld1_vnum_s8_with_svst1_vnum_s8() {1150 let len = svcntb() as usize;1151 let mut storage = [0 as i8; 1280usize];1152 let data = svindex_s8(1153 (len + 0usize).try_into().unwrap(),1154 1usize.try_into().unwrap(),1155 );1156 svst1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);1157 for (i, &val) in storage.iter().enumerate() {1158 assert!(val == 0 as i8 || val == i as i8);1159 }1160 svsetffr();1161 let loaded = svld1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);1162 assert_vector_matches_i8(1163 loaded,1164 svindex_s8(1165 (len + 0usize).try_into().unwrap(),1166 1usize.try_into().unwrap(),1167 ),1168 );1169}1170#[simd_test(enable = "sve")]1171unsafe fn test_svld1_vnum_s16_with_svst1_vnum_s16() {1172 let len = svcnth() as usize;1173 let mut storage = [0 as i16; 640usize];1174 let data = svindex_s16(1175 (len + 0usize).try_into().unwrap(),1176 1usize.try_into().unwrap(),1177 );1178 svst1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);1179 for (i, &val) in storage.iter().enumerate() {1180 assert!(val == 0 as i16 || val == i as i16);1181 }1182 svsetffr();1183 let loaded = svld1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);1184 assert_vector_matches_i16(1185 loaded,1186 svindex_s16(1187 (len + 0usize).try_into().unwrap(),1188 1usize.try_into().unwrap(),1189 ),1190 );1191}1192#[simd_test(enable = "sve")]1193unsafe fn test_svld1_vnum_s32_with_svst1_vnum_s32() {1194 let len = svcntw() as usize;1195 let mut storage = [0 as i32; 320usize];1196 let data = svindex_s32(1197 (len + 0usize).try_into().unwrap(),1198 1usize.try_into().unwrap(),1199 );1200 svst1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);1201 for (i, &val) in storage.iter().enumerate() {1202 assert!(val == 0 as i32 || val == i as i32);1203 }1204 svsetffr();1205 let loaded = svld1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);1206 assert_vector_matches_i32(1207 loaded,1208 svindex_s32(1209 (len + 0usize).try_into().unwrap(),1210 1usize.try_into().unwrap(),1211 ),1212 );1213}1214#[simd_test(enable = "sve")]1215unsafe fn test_svld1_vnum_s64_with_svst1_vnum_s64() {1216 let len = svcntd() as usize;1217 let mut storage = [0 as i64; 160usize];1218 let data = svindex_s64(1219 (len + 0usize).try_into().unwrap(),1220 1usize.try_into().unwrap(),1221 );1222 svst1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);1223 for (i, &val) in storage.iter().enumerate() {1224 assert!(val == 0 as i64 || val == i as i64);1225 }1226 svsetffr();1227 let loaded = svld1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);1228 assert_vector_matches_i64(1229 loaded,1230 svindex_s64(1231 (len + 0usize).try_into().unwrap(),1232 1usize.try_into().unwrap(),1233 ),1234 );1235}1236#[simd_test(enable = "sve")]1237unsafe fn test_svld1_vnum_u8_with_svst1_vnum_u8() {1238 let len = svcntb() as usize;1239 let mut storage = [0 as u8; 1280usize];1240 let data = svindex_u8(1241 (len + 0usize).try_into().unwrap(),1242 1usize.try_into().unwrap(),1243 );1244 svst1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);1245 for (i, &val) in storage.iter().enumerate() {1246 assert!(val == 0 as u8 || val == i as u8);1247 }1248 svsetffr();1249 let loaded = svld1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);1250 assert_vector_matches_u8(1251 loaded,1252 svindex_u8(1253 (len + 0usize).try_into().unwrap(),1254 1usize.try_into().unwrap(),1255 ),1256 );1257}1258#[simd_test(enable = "sve")]1259unsafe fn test_svld1_vnum_u16_with_svst1_vnum_u16() {1260 let len = svcnth() as usize;1261 let mut storage = [0 as u16; 640usize];1262 let data = svindex_u16(1263 (len + 0usize).try_into().unwrap(),1264 1usize.try_into().unwrap(),1265 );1266 svst1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);1267 for (i, &val) in storage.iter().enumerate() {1268 assert!(val == 0 as u16 || val == i as u16);1269 }1270 svsetffr();1271 let loaded = svld1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);1272 assert_vector_matches_u16(1273 loaded,1274 svindex_u16(1275 (len + 0usize).try_into().unwrap(),1276 1usize.try_into().unwrap(),1277 ),1278 );1279}1280#[simd_test(enable = "sve")]1281unsafe fn test_svld1_vnum_u32_with_svst1_vnum_u32() {1282 let len = svcntw() as usize;1283 let mut storage = [0 as u32; 320usize];1284 let data = svindex_u32(1285 (len + 0usize).try_into().unwrap(),1286 1usize.try_into().unwrap(),1287 );1288 svst1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);1289 for (i, &val) in storage.iter().enumerate() {1290 assert!(val == 0 as u32 || val == i as u32);1291 }1292 svsetffr();1293 let loaded = svld1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);1294 assert_vector_matches_u32(1295 loaded,1296 svindex_u32(1297 (len + 0usize).try_into().unwrap(),1298 1usize.try_into().unwrap(),1299 ),1300 );1301}1302#[simd_test(enable = "sve")]1303unsafe fn test_svld1_vnum_u64_with_svst1_vnum_u64() {1304 let len = svcntd() as usize;1305 let mut storage = [0 as u64; 160usize];1306 let data = svindex_u64(1307 (len + 0usize).try_into().unwrap(),1308 1usize.try_into().unwrap(),1309 );1310 svst1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);1311 for (i, &val) in storage.iter().enumerate() {1312 assert!(val == 0 as u64 || val == i as u64);1313 }1314 svsetffr();1315 let loaded = svld1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);1316 assert_vector_matches_u64(1317 loaded,1318 svindex_u64(1319 (len + 0usize).try_into().unwrap(),1320 1usize.try_into().unwrap(),1321 ),1322 );1323}1324#[simd_test(enable = "sve,f64mm")]1325unsafe fn test_svld1ro_f32() {1326 if svcntb() < 32 {1327 println!("Skipping test_svld1ro_f32 due to SVE vector length");1328 return;1329 }1330 svsetffr();1331 let loaded = svld1ro_f32(svptrue_b32(), F32_DATA.as_ptr());1332 assert_vector_matches_f32(1333 loaded,1334 svtrn1q_f32(1335 svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32),1336 svdupq_n_f32(4usize as f32, 5usize as f32, 6usize as f32, 7usize as f32),1337 ),1338 );1339}1340#[simd_test(enable = "sve,f64mm")]1341unsafe fn test_svld1ro_f64() {1342 if svcntb() < 32 {1343 println!("Skipping test_svld1ro_f64 due to SVE vector length");1344 return;1345 }1346 svsetffr();1347 let loaded = svld1ro_f64(svptrue_b64(), F64_DATA.as_ptr());1348 assert_vector_matches_f64(1349 loaded,1350 svtrn1q_f64(1351 svdupq_n_f64(0usize as f64, 1usize as f64),1352 svdupq_n_f64(2usize as f64, 3usize as f64),1353 ),1354 );1355}1356#[simd_test(enable = "sve,f64mm")]1357unsafe fn test_svld1ro_s8() {1358 if svcntb() < 32 {1359 println!("Skipping test_svld1ro_s8 due to SVE vector length");1360 return;1361 }1362 svsetffr();1363 let loaded = svld1ro_s8(svptrue_b8(), I8_DATA.as_ptr());1364 assert_vector_matches_i8(1365 loaded,1366 svtrn1q_s8(1367 svdupq_n_s8(1368 0usize as i8,1369 1usize as i8,1370 2usize as i8,1371 3usize as i8,1372 4usize as i8,1373 5usize as i8,1374 6usize as i8,1375 7usize as i8,1376 8usize as i8,1377 9usize as i8,1378 10usize as i8,1379 11usize as i8,1380 12usize as i8,1381 13usize as i8,1382 14usize as i8,1383 15usize as i8,1384 ),1385 svdupq_n_s8(1386 16usize as i8,1387 17usize as i8,1388 18usize as i8,1389 19usize as i8,1390 20usize as i8,1391 21usize as i8,1392 22usize as i8,1393 23usize as i8,1394 24usize as i8,1395 25usize as i8,1396 26usize as i8,1397 27usize as i8,1398 28usize as i8,1399 29usize as i8,1400 30usize as i8,1401 31usize as i8,1402 ),1403 ),1404 );1405}1406#[simd_test(enable = "sve,f64mm")]1407unsafe fn test_svld1ro_s16() {1408 if svcntb() < 32 {1409 println!("Skipping test_svld1ro_s16 due to SVE vector length");1410 return;1411 }1412 svsetffr();1413 let loaded = svld1ro_s16(svptrue_b16(), I16_DATA.as_ptr());1414 assert_vector_matches_i16(1415 loaded,1416 svtrn1q_s16(1417 svdupq_n_s16(1418 0usize as i16,1419 1usize as i16,1420 2usize as i16,1421 3usize as i16,1422 4usize as i16,1423 5usize as i16,1424 6usize as i16,1425 7usize as i16,1426 ),1427 svdupq_n_s16(1428 8usize as i16,1429 9usize as i16,1430 10usize as i16,1431 11usize as i16,1432 12usize as i16,1433 13usize as i16,1434 14usize as i16,1435 15usize as i16,1436 ),1437 ),1438 );1439}1440#[simd_test(enable = "sve,f64mm")]1441unsafe fn test_svld1ro_s32() {1442 if svcntb() < 32 {1443 println!("Skipping test_svld1ro_s32 due to SVE vector length");1444 return;1445 }1446 svsetffr();1447 let loaded = svld1ro_s32(svptrue_b32(), I32_DATA.as_ptr());1448 assert_vector_matches_i32(1449 loaded,1450 svtrn1q_s32(1451 svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32),1452 svdupq_n_s32(4usize as i32, 5usize as i32, 6usize as i32, 7usize as i32),1453 ),1454 );1455}1456#[simd_test(enable = "sve,f64mm")]1457unsafe fn test_svld1ro_s64() {1458 if svcntb() < 32 {1459 println!("Skipping test_svld1ro_s64 due to SVE vector length");1460 return;1461 }1462 svsetffr();1463 let loaded = svld1ro_s64(svptrue_b64(), I64_DATA.as_ptr());1464 assert_vector_matches_i64(1465 loaded,1466 svtrn1q_s64(1467 svdupq_n_s64(0usize as i64, 1usize as i64),1468 svdupq_n_s64(2usize as i64, 3usize as i64),1469 ),1470 );1471}1472#[simd_test(enable = "sve,f64mm")]1473unsafe fn test_svld1ro_u8() {1474 if svcntb() < 32 {1475 println!("Skipping test_svld1ro_u8 due to SVE vector length");1476 return;1477 }1478 svsetffr();1479 let loaded = svld1ro_u8(svptrue_b8(), U8_DATA.as_ptr());1480 assert_vector_matches_u8(1481 loaded,1482 svtrn1q_u8(1483 svdupq_n_u8(1484 0usize as u8,1485 1usize as u8,1486 2usize as u8,1487 3usize as u8,1488 4usize as u8,1489 5usize as u8,1490 6usize as u8,1491 7usize as u8,1492 8usize as u8,1493 9usize as u8,1494 10usize as u8,1495 11usize as u8,1496 12usize as u8,1497 13usize as u8,1498 14usize as u8,1499 15usize as u8,1500 ),1501 svdupq_n_u8(1502 16usize as u8,1503 17usize as u8,1504 18usize as u8,1505 19usize as u8,1506 20usize as u8,1507 21usize as u8,1508 22usize as u8,1509 23usize as u8,1510 24usize as u8,1511 25usize as u8,1512 26usize as u8,1513 27usize as u8,1514 28usize as u8,1515 29usize as u8,1516 30usize as u8,1517 31usize as u8,1518 ),1519 ),1520 );1521}1522#[simd_test(enable = "sve,f64mm")]1523unsafe fn test_svld1ro_u16() {1524 if svcntb() < 32 {1525 println!("Skipping test_svld1ro_u16 due to SVE vector length");1526 return;1527 }1528 svsetffr();1529 let loaded = svld1ro_u16(svptrue_b16(), U16_DATA.as_ptr());1530 assert_vector_matches_u16(1531 loaded,1532 svtrn1q_u16(1533 svdupq_n_u16(1534 0usize as u16,1535 1usize as u16,1536 2usize as u16,1537 3usize as u16,1538 4usize as u16,1539 5usize as u16,1540 6usize as u16,1541 7usize as u16,1542 ),1543 svdupq_n_u16(1544 8usize as u16,1545 9usize as u16,1546 10usize as u16,1547 11usize as u16,1548 12usize as u16,1549 13usize as u16,1550 14usize as u16,1551 15usize as u16,1552 ),1553 ),1554 );1555}1556#[simd_test(enable = "sve,f64mm")]1557unsafe fn test_svld1ro_u32() {1558 if svcntb() < 32 {1559 println!("Skipping test_svld1ro_u32 due to SVE vector length");1560 return;1561 }1562 svsetffr();1563 let loaded = svld1ro_u32(svptrue_b32(), U32_DATA.as_ptr());1564 assert_vector_matches_u32(1565 loaded,1566 svtrn1q_u32(1567 svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32),1568 svdupq_n_u32(4usize as u32, 5usize as u32, 6usize as u32, 7usize as u32),1569 ),1570 );1571}1572#[simd_test(enable = "sve,f64mm")]1573unsafe fn test_svld1ro_u64() {1574 if svcntb() < 32 {1575 println!("Skipping test_svld1ro_u64 due to SVE vector length");1576 return;1577 }1578 svsetffr();1579 let loaded = svld1ro_u64(svptrue_b64(), U64_DATA.as_ptr());1580 assert_vector_matches_u64(1581 loaded,1582 svtrn1q_u64(1583 svdupq_n_u64(0usize as u64, 1usize as u64),1584 svdupq_n_u64(2usize as u64, 3usize as u64),1585 ),1586 );1587}1588#[simd_test(enable = "sve")]1589unsafe fn test_svld1rq_f32() {1590 svsetffr();1591 let loaded = svld1rq_f32(svptrue_b32(), F32_DATA.as_ptr());1592 assert_vector_matches_f32(1593 loaded,1594 svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32),1595 );1596}1597#[simd_test(enable = "sve")]1598unsafe fn test_svld1rq_f64() {1599 svsetffr();1600 let loaded = svld1rq_f64(svptrue_b64(), F64_DATA.as_ptr());1601 assert_vector_matches_f64(loaded, svdupq_n_f64(0usize as f64, 1usize as f64));1602}1603#[simd_test(enable = "sve")]1604unsafe fn test_svld1rq_s8() {1605 svsetffr();1606 let loaded = svld1rq_s8(svptrue_b8(), I8_DATA.as_ptr());1607 assert_vector_matches_i8(1608 loaded,1609 svdupq_n_s8(1610 0usize as i8,1611 1usize as i8,1612 2usize as i8,1613 3usize as i8,1614 4usize as i8,1615 5usize as i8,1616 6usize as i8,1617 7usize as i8,1618 8usize as i8,1619 9usize as i8,1620 10usize as i8,1621 11usize as i8,1622 12usize as i8,1623 13usize as i8,1624 14usize as i8,1625 15usize as i8,1626 ),1627 );1628}1629#[simd_test(enable = "sve")]1630unsafe fn test_svld1rq_s16() {1631 svsetffr();1632 let loaded = svld1rq_s16(svptrue_b16(), I16_DATA.as_ptr());1633 assert_vector_matches_i16(1634 loaded,1635 svdupq_n_s16(1636 0usize as i16,1637 1usize as i16,1638 2usize as i16,1639 3usize as i16,1640 4usize as i16,1641 5usize as i16,1642 6usize as i16,1643 7usize as i16,1644 ),1645 );1646}1647#[simd_test(enable = "sve")]1648unsafe fn test_svld1rq_s32() {1649 svsetffr();1650 let loaded = svld1rq_s32(svptrue_b32(), I32_DATA.as_ptr());1651 assert_vector_matches_i32(1652 loaded,1653 svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32),1654 );1655}1656#[simd_test(enable = "sve")]1657unsafe fn test_svld1rq_s64() {1658 svsetffr();1659 let loaded = svld1rq_s64(svptrue_b64(), I64_DATA.as_ptr());1660 assert_vector_matches_i64(loaded, svdupq_n_s64(0usize as i64, 1usize as i64));1661}1662#[simd_test(enable = "sve")]1663unsafe fn test_svld1rq_u8() {1664 svsetffr();1665 let loaded = svld1rq_u8(svptrue_b8(), U8_DATA.as_ptr());1666 assert_vector_matches_u8(1667 loaded,1668 svdupq_n_u8(1669 0usize as u8,1670 1usize as u8,1671 2usize as u8,1672 3usize as u8,1673 4usize as u8,1674 5usize as u8,1675 6usize as u8,1676 7usize as u8,1677 8usize as u8,1678 9usize as u8,1679 10usize as u8,1680 11usize as u8,1681 12usize as u8,1682 13usize as u8,1683 14usize as u8,1684 15usize as u8,1685 ),1686 );1687}1688#[simd_test(enable = "sve")]1689unsafe fn test_svld1rq_u16() {1690 svsetffr();1691 let loaded = svld1rq_u16(svptrue_b16(), U16_DATA.as_ptr());1692 assert_vector_matches_u16(1693 loaded,1694 svdupq_n_u16(1695 0usize as u16,1696 1usize as u16,1697 2usize as u16,1698 3usize as u16,1699 4usize as u16,1700 5usize as u16,1701 6usize as u16,1702 7usize as u16,1703 ),1704 );1705}1706#[simd_test(enable = "sve")]1707unsafe fn test_svld1rq_u32() {1708 svsetffr();1709 let loaded = svld1rq_u32(svptrue_b32(), U32_DATA.as_ptr());1710 assert_vector_matches_u32(1711 loaded,1712 svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32),1713 );1714}1715#[simd_test(enable = "sve")]1716unsafe fn test_svld1rq_u64() {1717 svsetffr();1718 let loaded = svld1rq_u64(svptrue_b64(), U64_DATA.as_ptr());1719 assert_vector_matches_u64(loaded, svdupq_n_u64(0usize as u64, 1usize as u64));1720}1721#[simd_test(enable = "sve")]1722unsafe fn test_svld1sb_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() {1723 let mut storage = [0 as i8; 1280usize];1724 let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1725 let offsets = svindex_s32(0, 1u32.try_into().unwrap());1726 svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);1727 for (i, &val) in storage.iter().enumerate() {1728 assert!(val == 0 as i8 || val == i as i8);1729 }1730 svsetffr();1731 let loaded = svld1sb_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);1732 assert_vector_matches_i32(1733 loaded,1734 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1735 );1736}1737#[simd_test(enable = "sve")]1738unsafe fn test_svld1sh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() {1739 let mut storage = [0 as i16; 640usize];1740 let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1741 let offsets = svindex_s32(0, 2u32.try_into().unwrap());1742 svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);1743 for (i, &val) in storage.iter().enumerate() {1744 assert!(val == 0 as i16 || val == i as i16);1745 }1746 svsetffr();1747 let loaded =1748 svld1sh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);1749 assert_vector_matches_i32(1750 loaded,1751 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1752 );1753}1754#[simd_test(enable = "sve")]1755unsafe fn test_svld1sb_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() {1756 let mut storage = [0 as u8; 1280usize];1757 let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1758 let offsets = svindex_s32(0, 1u32.try_into().unwrap());1759 svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);1760 for (i, &val) in storage.iter().enumerate() {1761 assert!(val == 0 as u8 || val == i as u8);1762 }1763 svsetffr();1764 let loaded = svld1sb_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);1765 assert_vector_matches_u32(1766 loaded,1767 svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1768 );1769}1770#[simd_test(enable = "sve")]1771unsafe fn test_svld1sh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() {1772 let mut storage = [0 as u16; 640usize];1773 let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1774 let offsets = svindex_s32(0, 2u32.try_into().unwrap());1775 svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);1776 for (i, &val) in storage.iter().enumerate() {1777 assert!(val == 0 as u16 || val == i as u16);1778 }1779 svsetffr();1780 let loaded =1781 svld1sh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);1782 assert_vector_matches_u32(1783 loaded,1784 svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1785 );1786}1787#[simd_test(enable = "sve")]1788unsafe fn test_svld1sb_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() {1789 let mut storage = [0 as i8; 1280usize];1790 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1791 let offsets = svindex_s64(0, 1u32.try_into().unwrap());1792 svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);1793 for (i, &val) in storage.iter().enumerate() {1794 assert!(val == 0 as i8 || val == i as i8);1795 }1796 svsetffr();1797 let loaded = svld1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);1798 assert_vector_matches_i64(1799 loaded,1800 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1801 );1802}1803#[simd_test(enable = "sve")]1804unsafe fn test_svld1sh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() {1805 let mut storage = [0 as i16; 640usize];1806 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1807 let offsets = svindex_s64(0, 2u32.try_into().unwrap());1808 svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);1809 for (i, &val) in storage.iter().enumerate() {1810 assert!(val == 0 as i16 || val == i as i16);1811 }1812 svsetffr();1813 let loaded =1814 svld1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);1815 assert_vector_matches_i64(1816 loaded,1817 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1818 );1819}1820#[simd_test(enable = "sve")]1821unsafe fn test_svld1sw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() {1822 let mut storage = [0 as i32; 320usize];1823 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1824 let offsets = svindex_s64(0, 4u32.try_into().unwrap());1825 svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);1826 for (i, &val) in storage.iter().enumerate() {1827 assert!(val == 0 as i32 || val == i as i32);1828 }1829 svsetffr();1830 let loaded =1831 svld1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);1832 assert_vector_matches_i64(1833 loaded,1834 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1835 );1836}1837#[simd_test(enable = "sve")]1838unsafe fn test_svld1sb_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() {1839 let mut storage = [0 as u8; 1280usize];1840 let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1841 let offsets = svindex_s64(0, 1u32.try_into().unwrap());1842 svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);1843 for (i, &val) in storage.iter().enumerate() {1844 assert!(val == 0 as u8 || val == i as u8);1845 }1846 svsetffr();1847 let loaded = svld1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);1848 assert_vector_matches_u64(1849 loaded,1850 svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1851 );1852}1853#[simd_test(enable = "sve")]1854unsafe fn test_svld1sh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() {1855 let mut storage = [0 as u16; 640usize];1856 let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1857 let offsets = svindex_s64(0, 2u32.try_into().unwrap());1858 svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);1859 for (i, &val) in storage.iter().enumerate() {1860 assert!(val == 0 as u16 || val == i as u16);1861 }1862 svsetffr();1863 let loaded =1864 svld1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);1865 assert_vector_matches_u64(1866 loaded,1867 svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1868 );1869}1870#[simd_test(enable = "sve")]1871unsafe fn test_svld1sw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() {1872 let mut storage = [0 as u32; 320usize];1873 let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1874 let offsets = svindex_s64(0, 4u32.try_into().unwrap());1875 svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);1876 for (i, &val) in storage.iter().enumerate() {1877 assert!(val == 0 as u32 || val == i as u32);1878 }1879 svsetffr();1880 let loaded =1881 svld1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);1882 assert_vector_matches_u64(1883 loaded,1884 svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1885 );1886}1887#[simd_test(enable = "sve")]1888unsafe fn test_svld1sb_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() {1889 let mut storage = [0 as i8; 1280usize];1890 let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1891 let offsets = svindex_u32(0, 1u32.try_into().unwrap());1892 svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);1893 for (i, &val) in storage.iter().enumerate() {1894 assert!(val == 0 as i8 || val == i as i8);1895 }1896 svsetffr();1897 let loaded = svld1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);1898 assert_vector_matches_i32(1899 loaded,1900 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1901 );1902}1903#[simd_test(enable = "sve")]1904unsafe fn test_svld1sh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() {1905 let mut storage = [0 as i16; 640usize];1906 let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1907 let offsets = svindex_u32(0, 2u32.try_into().unwrap());1908 svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);1909 for (i, &val) in storage.iter().enumerate() {1910 assert!(val == 0 as i16 || val == i as i16);1911 }1912 svsetffr();1913 let loaded =1914 svld1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);1915 assert_vector_matches_i32(1916 loaded,1917 svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1918 );1919}1920#[simd_test(enable = "sve")]1921unsafe fn test_svld1sb_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() {1922 let mut storage = [0 as u8; 1280usize];1923 let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1924 let offsets = svindex_u32(0, 1u32.try_into().unwrap());1925 svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);1926 for (i, &val) in storage.iter().enumerate() {1927 assert!(val == 0 as u8 || val == i as u8);1928 }1929 svsetffr();1930 let loaded = svld1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);1931 assert_vector_matches_u32(1932 loaded,1933 svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1934 );1935}1936#[simd_test(enable = "sve")]1937unsafe fn test_svld1sh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() {1938 let mut storage = [0 as u16; 640usize];1939 let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1940 let offsets = svindex_u32(0, 2u32.try_into().unwrap());1941 svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);1942 for (i, &val) in storage.iter().enumerate() {1943 assert!(val == 0 as u16 || val == i as u16);1944 }1945 svsetffr();1946 let loaded =1947 svld1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);1948 assert_vector_matches_u32(1949 loaded,1950 svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1951 );1952}1953#[simd_test(enable = "sve")]1954unsafe fn test_svld1sb_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() {1955 let mut storage = [0 as i8; 1280usize];1956 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1957 let offsets = svindex_u64(0, 1u32.try_into().unwrap());1958 svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);1959 for (i, &val) in storage.iter().enumerate() {1960 assert!(val == 0 as i8 || val == i as i8);1961 }1962 svsetffr();1963 let loaded = svld1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);1964 assert_vector_matches_i64(1965 loaded,1966 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1967 );1968}1969#[simd_test(enable = "sve")]1970unsafe fn test_svld1sh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() {1971 let mut storage = [0 as i16; 640usize];1972 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1973 let offsets = svindex_u64(0, 2u32.try_into().unwrap());1974 svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);1975 for (i, &val) in storage.iter().enumerate() {1976 assert!(val == 0 as i16 || val == i as i16);1977 }1978 svsetffr();1979 let loaded =1980 svld1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);1981 assert_vector_matches_i64(1982 loaded,1983 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),1984 );1985}1986#[simd_test(enable = "sve")]1987unsafe fn test_svld1sw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() {1988 let mut storage = [0 as i32; 320usize];1989 let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());1990 let offsets = svindex_u64(0, 4u32.try_into().unwrap());1991 svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);1992 for (i, &val) in storage.iter().enumerate() {1993 assert!(val == 0 as i32 || val == i as i32);1994 }1995 svsetffr();1996 let loaded =1997 svld1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);1998 assert_vector_matches_i64(1999 loaded,2000 svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
Findings
✓ No findings reported for this file.