+ /*
+ * If !HAVE_ATOMIC128_RO, then atomic16_set may be implemented with a
+ * 16-byte compare and store loop, which is expensive, so prefer two 8-byte
+ * stores in this case.
+ */
+ if (HAVE_ATOMIC128_RO && (is_load || HAVE_ATOMIC128_RW)
+ && (test % 16 == 0)) {
+ for (; reg_start < evl; reg_start += 16 >> log2_esz, host += 16) {
+ vext_ldst_atom_16_host(vd, reg_start * esz, host, is_load);
+ }
+ return;
+ }
+ if (test % 8 == 0) {
+ for (; reg_start < evl; reg_start += 8 >> log2_esz, host += 8) {
+ vext_ldst_atom_8_host(vd, reg_start * esz, host, is_load);
+ }
+ return;
+ }
+ if (test % 4 == 0) {
+ for (; reg_start < evl; reg_start += 4 >> log2_esz, host += 4) {
+ vext_ldst_atom_4_host(vd, reg_start * esz, host, is_load);
}
+ return;
+ }
+ if (test % 2 == 0) {
+ for (; reg_start < evl; reg_start += 2 >> log2_esz, host += 2) {
+ vext_ldst_atom_2_host(vd, reg_start * esz, host, is_load);
+ }
+ return;
}
+ g_assert_not_reached();