summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKaz Kylheku <kaz@kylheku.com>2021-10-20 22:20:18 -0700
committerKaz Kylheku <kaz@kylheku.com>2021-10-20 22:20:18 -0700
commitc29c79e0cf015ece40f8e23b5acfb452fcf30f04 (patch)
tree3e63d8445425dda874a3491a97eaa2234ba40d39
parent6ccf4e55ee05c998a0168cc58af728b046df5755 (diff)
downloadtxr-c29c79e0cf015ece40f8e23b5acfb452fcf30f04.tar.gz
txr-c29c79e0cf015ece40f8e23b5acfb452fcf30f04.tar.bz2
txr-c29c79e0cf015ece40f8e23b5acfb452fcf30f04.zip
ffi: take advantage of hardware unaligned access.
* ffi.c (align_sw_get, align_sw_end, align_sw_put_end, align_sw_put): On Intel, PowerPC and also on ARM if certain compiler options are in effect (set by the user building TXR, not us), define these macros to do nothing. This shrinks and speeds up all the functions which use these macros for handling unaligned accesses.
-rw-r--r--ffi.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/ffi.c b/ffi.c
index 53971cc4..2435bc8b 100644
--- a/ffi.c
+++ b/ffi.c
@@ -517,6 +517,15 @@ static void ffi_simple_release(struct txr_ffi_type *tft, val obj,
*loc = 0;
}
+#if __i386__ || __x86_64__ || __PPC64__ || __ARM_FEATURE_UNALIGNED
+
+#define align_sw_get(type, src)
+#define align_sw_end
+#define align_sw_put_end
+#define align_sw_put(type, dst, expr) (expr)
+
+#else
+
#define align_sw_get(type, src) { \
const int al = ((alignof (type) - 1) & coerce(uint_ptr_t, src)) == 0; \
const size_t sz = sizeof (type); \
@@ -543,6 +552,8 @@ static void ffi_simple_release(struct txr_ffi_type *tft, val obj,
} \
}
+#endif
+
#if HAVE_I8
static void ffi_i8_put(struct txr_ffi_type *tft, val n, mem_t *dst, val self)
{