summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTimothy Pearson <kb9vqf@pearsoncomputing.net>2016-10-01 17:09:04 -0500
committerTimothy Pearson <kb9vqf@pearsoncomputing.net>2016-10-01 17:09:04 -0500
commite4e92bf2b00ed469141029640f192579c0ba1025 (patch)
tree6cd4352f84cfe4488277c879b70b5c43fd3fdd90
parentd8856bdf08c7fcbfe1608b692c632e2023d6dd06 (diff)
downloadulab-e4e92bf2b00ed469141029640f192579c0ba1025.tar.gz
ulab-e4e92bf2b00ed469141029640f192579c0ba1025.zip
Switch FFTS to linkotec branch for cross-arch support
-rw-r--r--lib/ffts/CMakeLists.txt462
-rw-r--r--lib/ffts/Makefile.in245
-rw-r--r--lib/ffts/README27
-rw-r--r--lib/ffts/README.md35
-rw-r--r--lib/ffts/aclocal.m4409
-rw-r--r--lib/ffts/config.h.in5
-rwxr-xr-xlib/ffts/configure166
-rw-r--r--lib/ffts/include/ffts.h94
-rw-r--r--lib/ffts/java/Makefile.in212
-rw-r--r--lib/ffts/java/android/.classpath9
-rw-r--r--lib/ffts/java/android/.project40
-rw-r--r--lib/ffts/java/android/.settings/org.eclipse.jdt.core.prefs4
-rw-r--r--lib/ffts/java/android/.settings/org.eclipse.ltk.core.refactoring.prefs2
-rw-r--r--lib/ffts/java/android/AndroidManifest.xml7
-rw-r--r--lib/ffts/java/android/ant.properties18
-rw-r--r--lib/ffts/java/android/build.xml92
-rw-r--r--lib/ffts/java/android/jni/Android.mk25
-rw-r--r--lib/ffts/java/android/jni/Application.mk2
-rw-r--r--lib/ffts/java/android/proguard-project.txt20
-rw-r--r--lib/ffts/java/android/project.properties15
-rw-r--r--lib/ffts/java/jni/ffts_jni.c4
-rw-r--r--lib/ffts/java/src/nz/ac/waikato/ffts/FFTS.java203
-rw-r--r--lib/ffts/m4/ax_check_class.m4144
-rw-r--r--lib/ffts/m4/ax_check_java_plugin.m4101
-rw-r--r--lib/ffts/m4/ax_java_check_class.m485
-rw-r--r--lib/ffts/m4/ax_prog_java.m4115
-rw-r--r--lib/ffts/m4/ax_prog_java_cc.m4104
-rw-r--r--lib/ffts/m4/ax_prog_java_works.m4134
-rw-r--r--lib/ffts/m4/ax_prog_javadoc.m452
-rw-r--r--lib/ffts/m4/ax_prog_javah.m443
-rw-r--r--lib/ffts/m4/ax_try_compile_java.m455
-rw-r--r--lib/ffts/m4/ax_try_run_java.m456
-rw-r--r--lib/ffts/src/Makefile.am4
-rw-r--r--lib/ffts/src/Makefile.in246
-rw-r--r--lib/ffts/src/arch/.gitignore6
-rw-r--r--lib/ffts/src/arch/ChangeLog4805
-rw-r--r--lib/ffts/src/arch/LICENSE21
-rw-r--r--lib/ffts/src/arch/Makefile.am11
-rw-r--r--lib/ffts/src/arch/README7
-rw-r--r--lib/ffts/src/arch/arm/.gitattributes1
-rw-r--r--lib/ffts/src/arch/arm/.gitignore15
-rw-r--r--lib/ffts/src/arch/arm/Makefile.am27
-rw-r--r--lib/ffts/src/arch/arm/arm-codegen.c193
-rw-r--r--lib/ffts/src/arch/arm/arm-codegen.h1127
-rw-r--r--lib/ffts/src/arch/arm/arm-dis.c509
-rw-r--r--lib/ffts/src/arch/arm/arm-dis.h41
-rw-r--r--lib/ffts/src/arch/arm/arm-vfp-codegen.h247
-rw-r--r--lib/ffts/src/arch/arm/arm-wmmx.h177
-rw-r--r--lib/ffts/src/arch/arm/cmp_macros.th56
-rw-r--r--lib/ffts/src/arch/arm/dpi_macros.th112
-rwxr-xr-xlib/ffts/src/arch/arm/dpiops.sh30
-rw-r--r--lib/ffts/src/arch/arm/mov_macros.th121
-rw-r--r--lib/ffts/src/arch/arm/tramp.c710
-rw-r--r--lib/ffts/src/arch/arm/vfp_macros.th15
-rw-r--r--lib/ffts/src/arch/arm/vfpm_macros.th14
-rwxr-xr-xlib/ffts/src/arch/arm/vfpops.sh24
-rw-r--r--lib/ffts/src/arch/arm64/.gitignore6
-rw-r--r--lib/ffts/src/arch/arm64/Makefile.am0
-rw-r--r--lib/ffts/src/arch/arm64/arm64-codegen.h3
-rw-r--r--lib/ffts/src/arch/ia64/.gitignore2
-rw-r--r--lib/ffts/src/arch/ia64/Makefile.am3
-rw-r--r--lib/ffts/src/arch/ia64/codegen.c861
-rw-r--r--lib/ffts/src/arch/ia64/ia64-codegen.h3183
-rw-r--r--lib/ffts/src/arch/mips/.gitignore6
-rw-r--r--lib/ffts/src/arch/mips/Makefile.am8
-rw-r--r--lib/ffts/src/arch/mips/mips-codegen.h435
-rw-r--r--lib/ffts/src/arch/mips/test.c159
-rw-r--r--lib/ffts/src/arch/ppc/.gitignore7
-rw-r--r--lib/ffts/src/arch/ppc/Makefile.am1
-rw-r--r--lib/ffts/src/arch/ppc/ppc-codegen.h953
-rw-r--r--lib/ffts/src/arch/s390x/.gitignore6
-rw-r--r--lib/ffts/src/arch/s390x/ChangeLog35
-rw-r--r--lib/ffts/src/arch/s390x/Makefile.am7
-rw-r--r--lib/ffts/src/arch/s390x/s390x-codegen.h997
-rw-r--r--lib/ffts/src/arch/s390x/tramp.c1149
-rw-r--r--lib/ffts/src/arch/sparc/.gitignore3
-rw-r--r--lib/ffts/src/arch/sparc/Makefile.am7
-rw-r--r--lib/ffts/src/arch/sparc/sparc-codegen.h955
-rw-r--r--lib/ffts/src/arch/sparc/test.c123
-rw-r--r--lib/ffts/src/arch/sparc/tramp.c1080
-rw-r--r--lib/ffts/src/arch/x64/.gitignore4
-rw-r--r--lib/ffts/src/arch/x64/Makefile.am2
-rw-r--r--lib/ffts/src/arch/x64/x64-codegen.h1938
-rw-r--r--lib/ffts/src/arch/x86/.gitignore6
-rw-r--r--lib/ffts/src/arch/x86/Makefile.am1
-rw-r--r--lib/ffts/src/arch/x86/x86-codegen.h2647
-rw-r--r--lib/ffts/src/codegen.c1166
-rw-r--r--lib/ffts/src/codegen.h27
-rw-r--r--lib/ffts/src/codegen_arm.h131
-rw-r--r--lib/ffts/src/codegen_sse.h1819
-rw-r--r--lib/ffts/src/ffts.c867
-rw-r--r--lib/ffts/src/ffts.h177
-rw-r--r--lib/ffts/src/ffts_attributes.h111
-rw-r--r--lib/ffts/src/ffts_dd.h230
-rw-r--r--lib/ffts/src/ffts_internal.h215
-rw-r--r--lib/ffts/src/ffts_nd.c439
-rw-r--r--lib/ffts/src/ffts_nd.h88
-rw-r--r--lib/ffts/src/ffts_real.c830
-rw-r--r--lib/ffts/src/ffts_real.h82
-rw-r--r--lib/ffts/src/ffts_real_nd.c390
-rw-r--r--lib/ffts/src/ffts_real_nd.h83
-rw-r--r--lib/ffts/src/ffts_small.c156
-rw-r--r--lib/ffts/src/ffts_small.h13
-rw-r--r--lib/ffts/src/ffts_static.c1291
-rw-r--r--lib/ffts/src/ffts_static.h119
-rw-r--r--lib/ffts/src/ffts_transpose.c194
-rw-r--r--lib/ffts/src/ffts_transpose.h46
-rw-r--r--lib/ffts/src/ffts_trig.c628
-rw-r--r--lib/ffts/src/ffts_trig.h56
-rw-r--r--lib/ffts/src/macros-alpha.h324
-rw-r--r--lib/ffts/src/macros-altivec.h1
-rw-r--r--lib/ffts/src/macros-neon.h167
-rw-r--r--lib/ffts/src/macros-sse.h140
-rw-r--r--lib/ffts/src/macros.h307
-rw-r--r--lib/ffts/src/neon.h88
-rw-r--r--lib/ffts/src/neon.s1508
-rw-r--r--lib/ffts/src/neon_float.h1126
-rw-r--r--lib/ffts/src/neon_static.s1255
-rw-r--r--lib/ffts/src/neon_static_f.s956
-rw-r--r--lib/ffts/src/neon_static_i.s955
-rw-r--r--lib/ffts/src/patterns.c208
-rw-r--r--lib/ffts/src/patterns.h546
-rw-r--r--lib/ffts/src/sequitur.h448
-rw-r--r--lib/ffts/src/sse.s878
-rw-r--r--lib/ffts/src/types.h28
-rw-r--r--lib/ffts/src/vfp.h1
-rw-r--r--lib/ffts/src/vfp.s99
-rw-r--r--lib/ffts/tests/Makefile.in213
-rw-r--r--lib/ffts/tests/test.c246
129 files changed, 36385 insertions, 8358 deletions
diff --git a/lib/ffts/CMakeLists.txt b/lib/ffts/CMakeLists.txt
new file mode 100644
index 0000000..2028c03
--- /dev/null
+++ b/lib/ffts/CMakeLists.txt
@@ -0,0 +1,462 @@
+cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR)
+
+project(ffts C ASM)
+
+# TODO: to support AutoConfigure building, this should came from "template" file
+set(FFTS_MAJOR 0)
+set(FFTS_MINOR 9)
+set(FFTS_MICRO 0)
+
+set(FFTS_VERSION "ffts-${FFTS_MAJOR}.${FFTS_MINOR}.${FFTS_MICRO}")
+
+set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+
+# default build type is Debug which means no optimization
+if(NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE "Release")
+endif(NOT CMAKE_BUILD_TYPE)
+
+# common options
+option(ENABLE_NEON
+ "Enables the use of NEON instructions." OFF
+)
+
+option(ENABLE_VFP
+ "Enables the use of VFP instructions." OFF
+)
+
+option(DISABLE_DYNAMIC_CODE
+ "Disables the use of dynamic machine code generation." OFF
+)
+
+option(GENERATE_POSITION_INDEPENDENT_CODE
+ "Generate position independent code" OFF
+)
+
+option(ENABLE_SHARED
+ "Enable building a shared library." OFF
+)
+
+option(ENABLE_STATIC
+ "Enable building a static library." ON
+)
+
+include(CheckCSourceCompiles)
+include(CheckCSourceRuns)
+include(CheckIncludeFile)
+
+# Ensure defined when building FFTS (as opposed to using it from
+# another project). Used to export functions from Windows DLL.
+add_definitions(-DFFTS_BUILD)
+
+# check existence of various headers
+check_include_file(malloc.h HAVE_MALLOC_H)
+check_include_file(stdint.h HAVE_STDINT_H)
+check_include_file(stdlib.h HAVE_STDLIB_H)
+check_include_file(string.h HAVE_STRING_H)
+check_include_file(sys/mman.h HAVE_SYS_MMAN_H)
+check_include_file(unistd.h HAVE_UNISTD_H)
+
+if(HAVE_MALLOC_H)
+ add_definitions(-DHAVE_MALLOC_H)
+endif(HAVE_MALLOC_H)
+
+if(HAVE_STDINT_H)
+ add_definitions(-DHAVE_STDINT_H)
+endif(HAVE_STDINT_H)
+
+if(HAVE_STDLIB_H)
+ add_definitions(-DHAVE_STDLIB_H)
+endif(HAVE_STDLIB_H)
+
+if(HAVE_STRING_H)
+ add_definitions(-DHAVE_STRING_H)
+endif(HAVE_STRING_H)
+
+if(HAVE_SYS_MMAN_H)
+ add_definitions(-DHAVE_SYS_MMAN_H)
+endif(HAVE_SYS_MMAN_H)
+
+if(HAVE_UNISTD_H)
+ add_definitions(-DHAVE_UNISTD_H)
+endif(HAVE_UNISTD_H)
+
+# backup flags
+set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+
+# Determinate if we are cross-compiling
+if(NOT CMAKE_CROSSCOMPILING)
+ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
+ # Determinate ARM architecture
+
+ # Try to execute quietly without messages
+ set(CMAKE_REQUIRED_QUIET 1)
+
+ # The test for ARM architecture
+ set(TEST_SOURCE_CODE "int main() { return 0; }")
+
+ # GCC documentation says "native" is only supported on Linux, but let's try
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=native")
+ check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_NATIVE_FLAG_SUPPORTED)
+
+ if(NOT GCC_MARCH_NATIVE_FLAG_SUPPORTED)
+ # Fallback trying generic ARMv7
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=armv7-a")
+ check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_ARMV7A_FLAG_SUPPORTED)
+
+ if(NOT GCC_MARCH_ARMV7A_FLAG_SUPPORTED)
+ # Fallback trying generic ARMv6
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=armv6")
+ check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_ARMV6_FLAG_SUPPORTED)
+
+ if(NOT GCC_MARCH_ARMV6_FLAG_SUPPORTED)
+ message(WARNING "FFTS failed to determinate ARM architecture")
+ set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
+ else()
+ message("FFTS is build using 'march=armv6'")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -march=armv6")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv6")
+ endif(NOT GCC_MARCH_ARMV6_FLAG_SUPPORTED)
+ else()
+ message("FFTS is build using 'march=armv7-a'")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -march=armv7-a")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv7-a")
+ endif(NOT GCC_MARCH_ARMV7A_FLAG_SUPPORTED)
+ else()
+ message("FFTS is build using 'march=native'")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -march=native")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
+ endif(NOT GCC_MARCH_NATIVE_FLAG_SUPPORTED)
+
+ # Determinate what floating-point hardware (or hardware emulation) is available
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+
+ # The test for ARM NEON support
+ set(TEST_SOURCE_CODE "
+ #include <arm_neon.h>
+ int main()
+ {
+ float32x4_t v;
+ float zeros[4] = {0.0f, 0.0f, 0.0f, 0.0f};
+ v = vld1q_f32(zeros);
+ return 0;
+ }"
+ )
+
+ # Test running with -mfpu=neon and -mfloat-abi=hard
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon -mfloat-abi=hard")
+ check_c_source_runs("${TEST_SOURCE_CODE}" NEON_HARDFP_SUPPORTED)
+
+ if(NOT NEON_HARDFP_SUPPORTED)
+ # Test running with -mfpu=neon and -mfloat-abi=softfp
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon -mfloat-abi=softfp")
+ check_c_source_runs("${TEST_SOURCE_CODE}" NEON_SOFTFP_SUPPORTED)
+
+ if(NOT NEON_SOFTFP_SUPPORTED)
+ if(ENABLE_NEON)
+ message(FATAL_ERROR "FFTS cannot enable NEON on this platform")
+ endif(ENABLE_NEON)
+ else()
+ message("FFTS is using 'neon' FPU and 'softfp' float ABI")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=neon -mfloat-abi=softfp")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon -mfloat-abi=softfp")
+ set(ENABLE_NEON ON)
+ endif(NOT NEON_SOFTFP_SUPPORTED)
+ else()
+ message("FFTS is using 'neon' FPU and 'hard' float ABI")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=neon -mfloat-abi=hard")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon -mfloat-abi=hard")
+ set(ENABLE_NEON ON)
+ endif(NOT NEON_HARDFP_SUPPORTED)
+
+ # Fallback using VFP if NEON is not supported
+ if(NOT NEON_HARDFP_SUPPORTED AND NOT NEON_SOFTFP_SUPPORTED)
+ # Test for ARM VFP support
+ set(TEST_SOURCE_CODE "
+ double sum(double a, double b)
+ {
+ return a + b;
+ }
+ int main()
+ {
+ double s1, s2, v1 = 1.0, v2 = 2.0, v3 = 1.0e-322;
+ s1 = sum(v1, v2);
+ s2 = sum(v3, v3);
+ return 0;
+ }"
+ )
+
+ # Test running with -mfpu=vfp
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=vfp")
+ check_c_source_runs("${TEST_SOURCE_CODE}" VFP_SUPPORTED)
+
+ if(NOT VFP_SUPPORTED)
+ # Fallback using emulation if VFP is not supported
+ if(ENABLE_VFP)
+ message(FATAL_ERROR "FFTS cannot enable VFP on this platform")
+ endif(ENABLE_VFP)
+
+ message(WARNING "FFTS is using 'soft' FPU")
+ else()
+ message("FFTS is using 'vfp' FPU")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=vfp")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp")
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+ set(ENABLE_VFP ON)
+ endif(NOT VFP_SUPPORTED)
+
+ # Test running with -mfloat-abi=hard
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=hard")
+
+ # Use the same test as before
+ check_c_source_runs("${TEST_SOURCE_CODE}" HARDFP_SUPPORTED)
+
+ if(NOT HARDFP_SUPPORTED)
+ # Test running with -mfloat-abi=softfp
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=softfp")
+ check_c_source_runs("${TEST_SOURCE_CODE}" SOFTFP_SUPPORTED)
+
+ if(NOT SOFTFP_SUPPORTED)
+ # Most likely development libraries are missing
+ message(WARNING "FFTS is using 'soft' float ABI")
+ else()
+ message("FFTS is using 'softfp' float ABI")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfloat-abi=softfp")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp")
+ endif(NOT SOFTFP_SUPPORTED)
+ else()
+ message("FFTS is using 'hard' float ABI")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfloat-abi=hard")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard")
+ endif(NOT HARDFP_SUPPORTED)
+ endif(NOT NEON_HARDFP_SUPPORTED AND NOT NEON_SOFTFP_SUPPORTED)
+ else()
+ # enable SSE code generation
+ if(CMAKE_COMPILER_IS_GNUCC)
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse")
+ endif(CMAKE_COMPILER_IS_GNUCC)
+
+ # check if the platform has support for SSE intrinsics
+ check_include_file(xmmintrin.h HAVE_XMMINTRIN_H)
+ if(HAVE_XMMINTRIN_H)
+ add_definitions(-DHAVE_SSE)
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+ endif(HAVE_XMMINTRIN_H)
+
+ # enable SSE2 code generation
+ if(CMAKE_COMPILER_IS_GNUCC)
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse2")
+ endif(CMAKE_COMPILER_IS_GNUCC)
+
+ # check if the platform has support for SSE2 intrinsics
+ check_include_file(emmintrin.h HAVE_EMMINTRIN_H)
+ if(HAVE_EMMINTRIN_H)
+ add_definitions(-DHAVE_SSE2)
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+ endif(HAVE_EMMINTRIN_H)
+
+ # enable SSE3 code generation
+ if(CMAKE_COMPILER_IS_GNUCC)
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse3")
+ endif(CMAKE_COMPILER_IS_GNUCC)
+
+ # check if the platform has support for SSE3 intrinsics
+ check_include_file(pmmintrin.h HAVE_PMMINTRIN_H)
+ if(HAVE_PMMINTRIN_H)
+ add_definitions(-DHAVE_PMMINTRIN_H)
+ add_definitions(-DHAVE_SSE3)
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+ else()
+ # check if the platform has specific intrinsics
+ check_include_file(intrin.h HAVE_INTRIN_H)
+ if(HAVE_INTRIN_H)
+ add_definitions(-DHAVE_INTRIN_H)
+
+ check_c_source_compiles("
+ #include<intrin.h>
+ int main(int argc, char** argv)
+ {
+ (void) argv;
+ (void) argc;
+ return _mm_movemask_ps(_mm_moveldup_ps(_mm_set_ss(1.0f)));
+ }" HAVE__MM_MOVELDUP_PS
+ )
+
+ if(HAVE__MM_MOVELDUP_PS)
+ # assume that we have all SSE3 intrinsics
+ add_definitions(-DHAVE_SSE3)
+ endif(HAVE__MM_MOVELDUP_PS)
+ endif(HAVE_INTRIN_H)
+ endif(HAVE_PMMINTRIN_H)
+ endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
+else()
+ # TODO: Add detections for compiler support and headers
+endif(NOT CMAKE_CROSSCOMPILING)
+
+# restore flags
+set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
+
+# compiler settings
+if(MSVC)
+ # enable all warnings but also disable some..
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4 /wd4127")
+
+ # mark debug versions
+ set(CMAKE_DEBUG_POSTFIX "d")
+
+ add_definitions(-D_USE_MATH_DEFINES)
+elseif(CMAKE_COMPILER_IS_GNUCC)
+ include(CheckCCompilerFlag)
+ include(CheckLibraryExists)
+
+ # enable all warnings
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra")
+
+ # check if we can control visibility of symbols
+ check_c_compiler_flag(-fvisibility=hidden HAVE_GCC_VISIBILITY)
+ if(HAVE_GCC_VISIBILITY)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
+ add_definitions(-DHAVE_GCC_VISIBILITY)
+ endif(HAVE_GCC_VISIBILITY)
+
+ # some systems need libm for the math functions to work
+ check_library_exists(m pow "" HAVE_LIBM)
+ if(HAVE_LIBM)
+ list(APPEND CMAKE_REQUIRED_LIBRARIES m)
+ list(APPEND FFTS_EXTRA_LIBRARIES m)
+ endif(HAVE_LIBM)
+
+ if(HAVE_PMMINTRIN_H)
+ add_definitions(-msse3)
+ elseif(HAVE_EMMINTRIN_H)
+ add_definitions(-msse2)
+ elseif(HAVE_XMMINTRIN_H)
+ add_definitions(-msse)
+ endif(HAVE_PMMINTRIN_H)
+endif(MSVC)
+
+include_directories(include)
+include_directories(src)
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+set(FFTS_HEADERS
+ include/ffts.h
+)
+
+set(FFTS_SOURCES
+ src/ffts_attributes.h
+ src/ffts.c
+ src/ffts_internal.h
+ src/ffts_nd.c
+ src/ffts_nd.h
+ src/ffts_real.h
+ src/ffts_real.c
+ src/ffts_real_nd.c
+ src/ffts_real_nd.h
+ src/ffts_transpose.c
+ src/ffts_transpose.h
+ src/ffts_trig.c
+ src/ffts_trig.h
+ src/ffts_static.c
+ src/ffts_static.h
+ src/macros.h
+ src/patterns.h
+ src/types.h
+)
+
+if(ENABLE_NEON)
+ list(APPEND FFTS_SOURCES
+ src/neon.s
+ )
+
+ if(DISABLE_DYNAMIC_CODE)
+ list(APPEND FFTS_SOURCES
+ src/neon_static.s
+ )
+ endif(DISABLE_DYNAMIC_CODE)
+
+ add_definitions(-DHAVE_NEON)
+elseif(ENABLE_VFP)
+ if(NOT DISABLE_DYNAMIC_CODE)
+ list(APPEND FFTS_SOURCES
+ src/vfp.s
+ )
+ endif(NOT DISABLE_DYNAMIC_CODE)
+
+ add_definitions(-DHAVE_VFP)
+elseif(HAVE_XMMINTRIN_H)
+ add_definitions(-DHAVE_SSE)
+
+ list(APPEND FFTS_SOURCES
+ src/macros-sse.h
+ )
+
+ if(NOT DISABLE_DYNAMIC_CODE)
+ if(CMAKE_SIZEOF_VOID_P EQUAL 8)
+ list(APPEND FFTS_SOURCES
+ src/codegen_sse.h
+ )
+ else()
+ message(WARNING "Dynamic code is only supported with x64, disabling dynamic code.")
+ set(DISABLE_DYNAMIC_CODE ON)
+ endif(CMAKE_SIZEOF_VOID_P EQUAL 8)
+ endif(NOT DISABLE_DYNAMIC_CODE)
+endif(ENABLE_NEON)
+
+if(DISABLE_DYNAMIC_CODE)
+ add_definitions(-DDYNAMIC_DISABLED)
+else()
+ list(APPEND FFTS_SOURCES
+ src/codegen.c
+ src/codegen.h
+ )
+endif(DISABLE_DYNAMIC_CODE)
+
+if(GENERATE_POSITION_INDEPENDENT_CODE)
+ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+endif(GENERATE_POSITION_INDEPENDENT_CODE)
+
+if(ENABLE_SHARED)
+ add_library(ffts_shared SHARED
+ ${FFTS_HEADERS}
+ ${FFTS_SOURCES}
+ )
+
+ # On unix-like platforms the library is called "libffts.so" and on Windows "ffts.dll"
+ set_target_properties(ffts_shared PROPERTIES
+ DEFINE_SYMBOL FFTS_SHARED
+ OUTPUT_NAME ffts
+ VERSION ${FFTS_MAJOR}.${FFTS_MINOR}.${FFTS_MICRO}
+ )
+endif(ENABLE_SHARED)
+
+if(ENABLE_STATIC)
+ add_library(ffts_static STATIC
+ ${FFTS_HEADERS}
+ ${FFTS_SOURCES}
+ )
+
+ if(UNIX)
+ # On unix-like platforms the library is called "libffts.a"
+ set_target_properties(ffts_static PROPERTIES OUTPUT_NAME ffts)
+ endif(UNIX)
+endif(ENABLE_STATIC)
+
+if(ENABLE_STATIC OR ENABLE_SHARED)
+ add_executable(ffts_test
+ tests/test.c
+ )
+
+ # link with static library by default
+ if(ENABLE_STATIC)
+ add_library(ffts ALIAS ffts_static)
+ else()
+ add_library(ffts ALIAS ffts_shared)
+ endif(ENABLE_STATIC)
+
+ target_link_libraries(ffts_test
+ ffts
+ ${FFTS_EXTRA_LIBRARIES}
+ )
+endif(ENABLE_STATIC OR ENABLE_SHARED)
diff --git a/lib/ffts/Makefile.in b/lib/ffts/Makefile.in
index e13362c..6540453 100644
--- a/lib/ffts/Makefile.in
+++ b/lib/ffts/Makefile.in
@@ -1,7 +1,7 @@
-# Makefile.in generated by automake 1.12.4 from Makefile.am.
+# Makefile.in generated by automake 1.14 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994-2012 Free Software Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -15,23 +15,51 @@
@SET_MAKE@
VPATH = @srcdir@
-am__make_dryrun = \
- { \
- am__dry=no; \
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
- echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
- | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
- *) \
- for am__flg in $$MAKEFLAGS; do \
- case $$am__flg in \
- *=*|--*) ;; \
- *n*) am__dry=yes; break;; \
- esac; \
- done;; \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
- test $$am__dry = yes; \
- }
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -52,10 +80,11 @@ build_triplet = @build@
host_triplet = @host@
@ENABLE_JNI_TRUE@am__append_1 = java
subdir = .
-DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in $(srcdir)/config.h.in \
- $(srcdir)/ffts.pc.in $(top_srcdir)/configure AUTHORS \
- config.guess config.sub depcomp install-sh ltmain.sh missing
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(top_srcdir)/configure $(am__configure_deps) \
+ $(srcdir)/config.h.in $(srcdir)/ffts.pc.in AUTHORS README \
+ compile config.guess config.sub depcomp install-sh missing \
+ ltmain.sh
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_check_classpath.m4 \
$(top_srcdir)/m4/ax_check_java_home.m4 \
@@ -73,15 +102,28 @@ mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = config.h
CONFIG_CLEAN_FILES = ffts.pc
CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
-RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
- html-recursive info-recursive install-data-recursive \
- install-dvi-recursive install-exec-recursive \
- install-html-recursive install-info-recursive \
- install-pdf-recursive install-ps-recursive install-recursive \
- installcheck-recursive installdirs-recursive pdf-recursive \
- ps-recursive uninstall-recursive
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
@@ -118,9 +160,30 @@ am__installdirs = "$(DESTDIR)$(pkgconfigdir)"
DATA = $(pkgconfig_DATA)
RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
distclean-recursive maintainer-clean-recursive
-AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
- $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
cscope distdir dist dist-all distcheck
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \
+ $(LISP)config.h.in
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
CSCOPE = cscope
@@ -169,6 +232,7 @@ am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \
distcleancheck_listfiles = find . -type f -print
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
@@ -343,8 +407,8 @@ $(ACLOCAL_M4): $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
config.h: stamp-h1
- @if test ! -f $@; then rm -f stamp-h1; else :; fi
- @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi
+ @test -f $@ || rm -f stamp-h1
+ @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1
stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
@rm -f stamp-h1
@@ -395,14 +459,13 @@ uninstall-pkgconfigDATA:
# (1) if the variable is set in 'config.status', edit 'config.status'
# (which will cause the Makefiles to be regenerated when you run 'make');
# (2) otherwise, pass the desired values on the 'make' command line.
-$(RECURSIVE_TARGETS) $(RECURSIVE_CLEAN_TARGETS):
- @fail= failcom='exit 1'; \
- for f in x $$MAKEFLAGS; do \
- case $$f in \
- *=* | --[!k]*);; \
- *k*) failcom='fail=yes';; \
- esac; \
- done; \
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
dot_seen=no; \
target=`echo $@ | sed s/-recursive//`; \
case "$@" in \
@@ -423,31 +486,13 @@ $(RECURSIVE_TARGETS) $(RECURSIVE_CLEAN_TARGETS):
if test "$$dot_seen" = "no"; then \
$(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
fi; test -z "$$fail"
-tags-recursive:
- list='$(SUBDIRS)'; for subdir in $$list; do \
- test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
- done
-ctags-recursive:
- list='$(SUBDIRS)'; for subdir in $$list; do \
- test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
- done
-cscopelist-recursive:
- list='$(SUBDIRS)'; for subdir in $$list; do \
- test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) cscopelist); \
- done
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- mkid -fID $$unique
-tags: TAGS
-
-TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
@@ -463,12 +508,7 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
fi; \
done; \
- list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ $(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
@@ -480,15 +520,11 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
$$unique; \
fi; \
fi
-ctags: CTAGS
-CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
@@ -497,18 +533,16 @@ GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
-
cscope: cscope.files
test ! -s cscope.files \
|| $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS)
-
clean-cscope:
-rm -f cscope.files
+cscope.files: clean-cscope cscopelist
+cscopelist: cscopelist-recursive
-cscope.files: clean-cscope cscopelist-recursive cscopelist
-
-cscopelist: cscopelist-recursive $(HEADERS) $(SOURCES) $(LISP)
- list='$(SOURCES) $(HEADERS) $(LISP)'; \
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
@@ -606,10 +640,16 @@ dist-xz: distdir
$(am__post_remove_distdir)
dist-tarZ: distdir
+ @echo WARNING: "Support for shar distribution archives is" \
+ "deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
$(am__post_remove_distdir)
dist-shar: distdir
+ @echo WARNING: "Support for distribution archives compressed with" \
+ "legacy program 'compress' is deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
$(am__post_remove_distdir)
@@ -814,27 +854,24 @@ ps-am:
uninstall-am: uninstall-pkgconfigDATA
-.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all \
- cscopelist-recursive ctags-recursive install-am install-strip \
- tags-recursive
-
-.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
- all all-am am--refresh check check-am clean clean-cscope \
- clean-generic clean-libtool cscope cscopelist \
- cscopelist-recursive ctags ctags-recursive dist dist-all \
- dist-bzip2 dist-gzip dist-lzip dist-shar dist-tarZ dist-xz \
- dist-zip distcheck distclean distclean-generic distclean-hdr \
- distclean-libtool distclean-tags distcleancheck distdir \
- distuninstallcheck dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-pkgconfigDATA install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs installdirs-am maintainer-clean \
+.MAKE: $(am__recursive_targets) all install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \
+ am--refresh check check-am clean clean-cscope clean-generic \
+ clean-libtool cscope cscopelist-am ctags ctags-am dist \
+ dist-all dist-bzip2 dist-gzip dist-lzip dist-shar dist-tarZ \
+ dist-xz dist-zip distcheck distclean distclean-generic \
+ distclean-hdr distclean-libtool distclean-tags distcleancheck \
+ distdir distuninstallcheck dvi dvi-am html html-am info \
+ info-am install install-am install-data install-data-am \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-pkgconfigDATA \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-generic \
- mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \
- uninstall uninstall-am uninstall-pkgconfigDATA
+ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \
+ uninstall-am uninstall-pkgconfigDATA
# Tell versions [3.59,3.63) of GNU make to not export all variables.
diff --git a/lib/ffts/README b/lib/ffts/README
deleted file mode 100644
index d2f320b..0000000
--- a/lib/ffts/README
+++ /dev/null
@@ -1,27 +0,0 @@
-FFTS -- The Fastest Fourier Transform in the South
-by Anthony Blake <anthonix@me.com>
-
-To build for Android, edit and run build_android.sh
-
-To build for iOS, edit and run build_iphone.sh
-
-To build for Linux or OS X on x86, run
-./configure --enable-sse --enable-single --prefix=/usr/local
-make
-make install
-
-FFTS dynamically generates code at runtime. This can be disabled with
---disable-dynamic-code
-
-For JNI targets: --enable-jni will build the jni stuff automatically for
-the host target, and --enable-shared must also be added manually for it to
-work.
-
-If you like FFTS, please show your support by sending a postcard to:
-
-Anthony Blake
-Department of Computer Science
-The University of Waikato
-Private Bag 3105
-Hamilton 3240
-NEW ZEALAND
diff --git a/lib/ffts/README.md b/lib/ffts/README.md
new file mode 100644
index 0000000..50fb60e
--- /dev/null
+++ b/lib/ffts/README.md
@@ -0,0 +1,35 @@
+# FFTS -- The Fastest Fourier Transform in the South
+
+[![Build Status](https://travis-ci.org/linkotec/ffts.svg?branch=master)](https://travis-ci.org/linkotec/ffts)
+
+To build for Android, edit and run build_android.sh
+
+To build for iOS, edit and run build_iphone.sh
+
+To build for Linux or OS X on x86, run
+ ./configure --enable-sse --enable-single --prefix=/usr/local
+ make
+ make install
+
+Optionally build for Windows and Linux with CMake, run
+ mkdir build
+ cd build
+ cmake ..
+
+FFTS dynamically generates code at runtime. This can be disabled with
+--disable-dynamic-code
+
+Note that 32 bit x86 dynamic machine code generation is not supported at the moment.
+
+For JNI targets: --enable-jni will build the jni stuff automatically for
+the host target, and --enable-shared must also be added manually for it to
+work.
+
+If you like FFTS, please show your support by sending a postcard to:
+
+Anthony Blake<br>
+Department of Computer Science<br>
+The University of Waikato<br>
+Private Bag 3105<br>
+Hamilton 3240<br>
+NEW ZEALAND
diff --git a/lib/ffts/aclocal.m4 b/lib/ffts/aclocal.m4
index 52a5dff..2c0f014 100644
--- a/lib/ffts/aclocal.m4
+++ b/lib/ffts/aclocal.m4
@@ -1,6 +1,6 @@
-# generated automatically by aclocal 1.12.4 -*- Autoconf -*-
+# generated automatically by aclocal 1.14 -*- Autoconf -*-
-# Copyright (C) 1996-2012 Free Software Foundation, Inc.
+# Copyright (C) 1996-2013 Free Software Foundation, Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -11,6 +11,7 @@
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
+m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],,
@@ -8606,7 +8607,7 @@ m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])])
m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])])
m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])])
-# Copyright (C) 2002-2012 Free Software Foundation, Inc.
+# Copyright (C) 2002-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -8618,10 +8619,10 @@ m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])])
# generated from the m4 files accompanying Automake X.Y.
# (This private macro should not be called outside this file.)
AC_DEFUN([AM_AUTOMAKE_VERSION],
-[am__api_version='1.12'
+[am__api_version='1.14'
dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
dnl require some minimum version. Point them to the right macro.
-m4_if([$1], [1.12.4], [],
+m4_if([$1], [1.14], [],
[AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
])
@@ -8637,14 +8638,14 @@ m4_define([_AM_AUTOCONF_VERSION], [])
# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
-[AM_AUTOMAKE_VERSION([1.12.4])dnl
+[AM_AUTOMAKE_VERSION([1.14])dnl
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
# Figure out how to run the assembler. -*- Autoconf -*-
-# Copyright (C) 2001-2012 Free Software Foundation, Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -8664,7 +8665,7 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
# AM_AUX_DIR_EXPAND -*- Autoconf -*-
-# Copyright (C) 2001-2012 Free Software Foundation, Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -8717,7 +8718,7 @@ am_aux_dir=`cd $ac_aux_dir && pwd`
# AM_CONDITIONAL -*- Autoconf -*-
-# Copyright (C) 1997-2012 Free Software Foundation, Inc.
+# Copyright (C) 1997-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -8748,7 +8749,7 @@ AC_CONFIG_COMMANDS_PRE(
Usually this means the macro was only invoked conditionally.]])
fi])])
-# Copyright (C) 1999-2012 Free Software Foundation, Inc.
+# Copyright (C) 1999-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -8939,7 +8940,7 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl
# Generate code to set up dependency tracking. -*- Autoconf -*-
-# Copyright (C) 1999-2012 Free Software Foundation, Inc.
+# Copyright (C) 1999-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -8950,7 +8951,7 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl
# ------------------------------
AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
[{
- # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # Older Autoconf quotes --file arguments for eval, but not when files
# are listed without --file. Let's play safe and only enable the eval
# if we detect the quoting.
case $CONFIG_FILES in
@@ -8979,7 +8980,7 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
test -z "$DEPDIR" && continue
am__include=`sed -n 's/^am__include = //p' < "$mf"`
- test -z "am__include" && continue
+ test -z "$am__include" && continue
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
# Find all dependency output files, they are included files with
# $(DEPDIR) in their names. We invoke sed twice because it is the
@@ -9015,7 +9016,7 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
# Do all the work for Automake. -*- Autoconf -*-
-# Copyright (C) 1996-2012 Free Software Foundation, Inc.
+# Copyright (C) 1996-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9024,6 +9025,12 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
# This macro actually does too much. Some checks are only needed if
# your package does certain things. But this isn't really a big deal.
+dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O.
+m4_define([AC_PROG_CC],
+m4_defn([AC_PROG_CC])
+[_AM_PROG_CC_C_O
+])
+
# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
# AM_INIT_AUTOMAKE([OPTIONS])
# -----------------------------------------------
@@ -9036,7 +9043,7 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
# arguments mandatory, and then we can depend on a new Autoconf
# release and drop the old call support.
AC_DEFUN([AM_INIT_AUTOMAKE],
-[AC_PREREQ([2.62])dnl
+[AC_PREREQ([2.65])dnl
dnl Autoconf wants to disallow AM_ names. We explicitly allow
dnl the ones we care about.
m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
@@ -9066,8 +9073,7 @@ AC_SUBST([CYGPATH_W])
dnl Distinguish between old-style and new-style calls.
m4_ifval([$2],
[AC_DIAGNOSE([obsolete],
-[$0: two- and three-arguments forms are deprecated. For more info, see:
-http://www.gnu.org/software/automake/manual/automake.html#Modernize-AM_INIT_AUTOMAKE-invocation])
+ [$0: two- and three-arguments forms are deprecated.])
m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
AC_SUBST([PACKAGE], [$1])dnl
AC_SUBST([VERSION], [$2])],
@@ -9121,22 +9127,60 @@ AC_PROVIDE_IFELSE([AC_PROG_OBJC],
[_AM_DEPENDENCIES([OBJC])],
[m4_define([AC_PROG_OBJC],
m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl
-dnl Support for Objective C++ was only introduced in Autoconf 2.65,
-dnl but we still cater to Autoconf 2.62.
-m4_ifdef([AC_PROG_OBJCXX],
-[AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
+AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
[_AM_DEPENDENCIES([OBJCXX])],
[m4_define([AC_PROG_OBJCXX],
- m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])])dnl
+ m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl
])
-_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl
-dnl The 'parallel-tests' driver may need to know about EXEEXT, so add the
-dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro
-dnl is hooked onto _AC_COMPILER_EXEEXT early, see below.
+AC_REQUIRE([AM_SILENT_RULES])dnl
+dnl The testsuite driver may need to know about EXEEXT, so add the
+dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This
+dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below.
AC_CONFIG_COMMANDS_PRE(dnl
[m4_provide_if([_AM_COMPILER_EXEEXT],
[AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
-])
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <http://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ AC_MSG_ERROR([Your 'rm' program is bad, sorry.])
+ fi
+fi])
dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
@@ -9144,7 +9188,6 @@ dnl mangled by Autoconf and run in a shell conditional statement.
m4_define([_AC_COMPILER_EXEEXT],
m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
-
# When config.status generates a header, we must update the stamp-h file.
# This file resides in the same directory as the config header
# that is generated. The stamp files are numbered to have different names.
@@ -9166,7 +9209,7 @@ for _am_header in $config_headers :; do
done
echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
-# Copyright (C) 2001-2012 Free Software Foundation, Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9187,7 +9230,7 @@ if test x"${install_sh}" != xset; then
fi
AC_SUBST([install_sh])])
-# Copyright (C) 2003-2012 Free Software Foundation, Inc.
+# Copyright (C) 2003-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9208,7 +9251,7 @@ AC_SUBST([am__leading_dot])])
# Check to see how 'make' treats includes. -*- Autoconf -*-
-# Copyright (C) 2001-2012 Free Software Foundation, Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9258,7 +9301,7 @@ rm -f confinc confmf
# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
-# Copyright (C) 1997-2012 Free Software Foundation, Inc.
+# Copyright (C) 1997-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9273,8 +9316,8 @@ AC_SUBST($1)])
# AM_MISSING_HAS_RUN
# ------------------
-# Define MISSING if not defined so far and test if it supports --run.
-# If it does, set am_missing_run to use it, otherwise, to nothing.
+# Define MISSING if not defined so far and test if it is modern enough.
+# If it is, set am_missing_run to use it, otherwise, to nothing.
AC_DEFUN([AM_MISSING_HAS_RUN],
[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
AC_REQUIRE_AUX_FILE([missing])dnl
@@ -9287,8 +9330,8 @@ if test x"${MISSING+set}" != xset; then
esac
fi
# Use eval to expand $SHELL
-if eval "$MISSING --run true"; then
- am_missing_run="$MISSING --run "
+if eval "$MISSING --is-lightweight"; then
+ am_missing_run="$MISSING "
else
am_missing_run=
AC_MSG_WARN(['missing' script is too old or missing])
@@ -9297,7 +9340,7 @@ fi
# Helper functions for option handling. -*- Autoconf -*-
-# Copyright (C) 2001-2012 Free Software Foundation, Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9326,9 +9369,73 @@ AC_DEFUN([_AM_SET_OPTIONS],
AC_DEFUN([_AM_IF_OPTION],
[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
+# Copyright (C) 1999-2013 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_PROG_CC_C_O
+# ---------------
+# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC
+# to automatically call this.
+AC_DEFUN([_AM_PROG_CC_C_O],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([compile])dnl
+AC_LANG_PUSH([C])dnl
+AC_CACHE_CHECK(
+ [whether $CC understands -c and -o together],
+ [am_cv_prog_cc_c_o],
+ [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])])
+ # Make sure it works both with $CC and with simple cc.
+ # Following AC_PROG_CC_C_O, we do the test twice because some
+ # compilers refuse to overwrite an existing .o file with -o,
+ # though they will create one.
+ am_cv_prog_cc_c_o=yes
+ for am_i in 1 2; do
+ if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \
+ && test -f conftest2.$ac_objext; then
+ : OK
+ else
+ am_cv_prog_cc_c_o=no
+ break
+ fi
+ done
+ rm -f core conftest*
+ unset am_i])
+if test "$am_cv_prog_cc_c_o" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+AC_LANG_POP([C])])
+
+# For backward compatibility.
+AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
+
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_RUN_LOG(COMMAND)
+# -------------------
+# Run COMMAND, save the exit status in ac_status, and log it.
+# (This has been adapted from Autoconf's _AC_RUN_LOG macro.)
+AC_DEFUN([AM_RUN_LOG],
+[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD
+ ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+ (exit $ac_status); }])
+
# Check to make sure that the build environment is sane. -*- Autoconf -*-
-# Copyright (C) 1996-2012 Free Software Foundation, Inc.
+# Copyright (C) 1996-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9409,7 +9516,67 @@ AC_CONFIG_COMMANDS_PRE(
rm -f conftest.file
])
-# Copyright (C) 2001-2012 Free Software Foundation, Inc.
+# Copyright (C) 2009-2013 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_SILENT_RULES([DEFAULT])
+# --------------------------
+# Enable less verbose build rules; with the default set to DEFAULT
+# ("yes" being less verbose, "no" or empty being verbose).
+AC_DEFUN([AM_SILENT_RULES],
+[AC_ARG_ENABLE([silent-rules], [dnl
+AS_HELP_STRING(
+ [--enable-silent-rules],
+ [less verbose build output (undo: "make V=1")])
+AS_HELP_STRING(
+ [--disable-silent-rules],
+ [verbose build output (undo: "make V=0")])dnl
+])
+case $enable_silent_rules in @%:@ (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);;
+esac
+dnl
+dnl A few 'make' implementations (e.g., NonStop OS and NextStep)
+dnl do not support nested variable expansions.
+dnl See automake bug#9928 and bug#10237.
+am_make=${MAKE-make}
+AC_CACHE_CHECK([whether $am_make supports nested variables],
+ [am_cv_make_support_nested_variables],
+ [if AS_ECHO([['TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi])
+if test $am_cv_make_support_nested_variables = yes; then
+ dnl Using '$V' instead of '$(V)' breaks IRIX make.
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AC_SUBST([AM_V])dnl
+AM_SUBST_NOTMAKE([AM_V])dnl
+AC_SUBST([AM_DEFAULT_V])dnl
+AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl
+AC_SUBST([AM_DEFAULT_VERBOSITY])dnl
+AM_BACKSLASH='\'
+AC_SUBST([AM_BACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
+])
+
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9437,7 +9604,7 @@ fi
INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
AC_SUBST([INSTALL_STRIP_PROGRAM])])
-# Copyright (C) 2006-2012 Free Software Foundation, Inc.
+# Copyright (C) 2006-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9456,7 +9623,7 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# Check how to create a tarball. -*- Autoconf -*-
-# Copyright (C) 2004-2012 Free Software Foundation, Inc.
+# Copyright (C) 2004-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -9475,76 +9642,114 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# Substitute a variable $(am__untar) that extract such
# a tarball read from stdin.
# $(am__untar) < result.tar
+#
AC_DEFUN([_AM_PROG_TAR],
[# Always define AMTAR for backward compatibility. Yes, it's still used
# in the wild :-( We should find a proper way to deprecate it ...
AC_SUBST([AMTAR], ['$${TAR-tar}'])
-m4_if([$1], [v7],
- [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
- [m4_case([$1], [ustar],, [pax],,
- [m4_fatal([Unknown tar format])])
-AC_MSG_CHECKING([how to create a $1 tar archive])
-# Loop over all known methods to create a tar archive until one works.
+
+# We'll loop over all known methods to create a tar archive until one works.
_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
-_am_tools=${am_cv_prog_tar_$1-$_am_tools}
-# Do not fold the above two line into one, because Tru64 sh and
-# Solaris sh will not grok spaces in the rhs of '-'.
-for _am_tool in $_am_tools
-do
- case $_am_tool in
- gnutar)
- for _am_tar in tar gnutar gtar;
- do
- AM_RUN_LOG([$_am_tar --version]) && break
- done
- am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
- am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
- am__untar="$_am_tar -xf -"
- ;;
- plaintar)
- # Must skip GNU tar: if it does not support --format= it doesn't create
- # ustar tarball either.
- (tar --version) >/dev/null 2>&1 && continue
- am__tar='tar chf - "$$tardir"'
- am__tar_='tar chf - "$tardir"'
- am__untar='tar xf -'
- ;;
- pax)
- am__tar='pax -L -x $1 -w "$$tardir"'
- am__tar_='pax -L -x $1 -w "$tardir"'
- am__untar='pax -r'
- ;;
- cpio)
- am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
- am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
- am__untar='cpio -i -H $1 -d'
- ;;
- none)
- am__tar=false
- am__tar_=false
- am__untar=false
- ;;
- esac
- # If the value was cached, stop now. We just wanted to have am__tar
- # and am__untar set.
- test -n "${am_cv_prog_tar_$1}" && break
+m4_if([$1], [v7],
+ [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
+
+ [m4_case([$1],
+ [ustar],
+ [# The POSIX 1988 'ustar' format is defined with fixed-size fields.
+ # There is notably a 21 bits limit for the UID and the GID. In fact,
+ # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343
+ # and bug#13588).
+ am_max_uid=2097151 # 2^21 - 1
+ am_max_gid=$am_max_uid
+ # The $UID and $GID variables are not portable, so we need to resort
+ # to the POSIX-mandated id(1) utility. Errors in the 'id' calls
+ # below are definitely unexpected, so allow the users to see them
+ # (that is, avoid stderr redirection).
+ am_uid=`id -u || echo unknown`
+ am_gid=`id -g || echo unknown`
+ AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format])
+ if test $am_uid -le $am_max_uid; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ _am_tools=none
+ fi
+ AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format])
+ if test $am_gid -le $am_max_gid; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ _am_tools=none
+ fi],
+
+ [pax],
+ [],
- # tar/untar a dummy directory, and stop if the command works
- rm -rf conftest.dir
- mkdir conftest.dir
- echo GrepMe > conftest.dir/file
- AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+ [m4_fatal([Unknown tar format])])
+
+ AC_MSG_CHECKING([how to create a $1 tar archive])
+
+ # Go ahead even if we have the value already cached. We do so because we
+ # need to set the values for the 'am__tar' and 'am__untar' variables.
+ _am_tools=${am_cv_prog_tar_$1-$_am_tools}
+
+ for _am_tool in $_am_tools; do
+ case $_am_tool in
+ gnutar)
+ for _am_tar in tar gnutar gtar; do
+ AM_RUN_LOG([$_am_tar --version]) && break
+ done
+ am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
+ am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
+ am__untar="$_am_tar -xf -"
+ ;;
+ plaintar)
+ # Must skip GNU tar: if it does not support --format= it doesn't create
+ # ustar tarball either.
+ (tar --version) >/dev/null 2>&1 && continue
+ am__tar='tar chf - "$$tardir"'
+ am__tar_='tar chf - "$tardir"'
+ am__untar='tar xf -'
+ ;;
+ pax)
+ am__tar='pax -L -x $1 -w "$$tardir"'
+ am__tar_='pax -L -x $1 -w "$tardir"'
+ am__untar='pax -r'
+ ;;
+ cpio)
+ am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
+ am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
+ am__untar='cpio -i -H $1 -d'
+ ;;
+ none)
+ am__tar=false
+ am__tar_=false
+ am__untar=false
+ ;;
+ esac
+
+ # If the value was cached, stop now. We just wanted to have am__tar
+ # and am__untar set.
+ test -n "${am_cv_prog_tar_$1}" && break
+
+ # tar/untar a dummy directory, and stop if the command works.
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ echo GrepMe > conftest.dir/file
+ AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+ rm -rf conftest.dir
+ if test -s conftest.tar; then
+ AM_RUN_LOG([$am__untar <conftest.tar])
+ AM_RUN_LOG([cat conftest.dir/file])
+ grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+ fi
+ done
rm -rf conftest.dir
- if test -s conftest.tar; then
- AM_RUN_LOG([$am__untar <conftest.tar])
- grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
- fi
-done
-rm -rf conftest.dir
-AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
-AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+ AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
+ AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+
AC_SUBST([am__tar])
AC_SUBST([am__untar])
]) # _AM_PROG_TAR
diff --git a/lib/ffts/config.h.in b/lib/ffts/config.h.in
index 60ecb2f..6b3ca60 100644
--- a/lib/ffts/config.h.in
+++ b/lib/ffts/config.h.in
@@ -9,9 +9,6 @@
/* Define to FFT in single precision. */
#undef FFTS_PREC_SINGLE
-/* Set ARM float abi. */
-#undef FLOAT_ABI
-
/* Define to 1 if you have the declaration of `memalign', and to 0 if you
don't. */
#undef HAVE_DECL_MEMALIGN
@@ -146,3 +143,5 @@
/* Define to the type of an unsigned integer type of width exactly 64 bits if
such a type exists and the standard includes do not define it. */
#undef uint64_t
+
+// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3:
diff --git a/lib/ffts/configure b/lib/ffts/configure
index ac1b7d0..2321b62 100755
--- a/lib/ffts/configure
+++ b/lib/ffts/configure
@@ -713,6 +713,10 @@ build_os
build_vendor
build_cpu
build
+AM_BACKSLASH
+AM_DEFAULT_VERBOSITY
+AM_DEFAULT_V
+AM_V
am__untar
am__tar
AMTAR
@@ -777,6 +781,7 @@ SHELL'
ac_subst_files=''
ac_user_opts='
enable_option_checking
+enable_silent_rules
enable_dependency_tracking
enable_shared
enable_static
@@ -1429,6 +1434,8 @@ Optional Features:
--disable-option-checking ignore unrecognized --enable/--with options
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-silent-rules less verbose build output (undo: "make V=1")
+ --disable-silent-rules verbose build output (undo: "make V=0")
--enable-dependency-tracking
do not reject slow dependency extractors
--disable-dependency-tracking
@@ -2608,7 +2615,7 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $
ac_compiler_gnu=$ac_cv_c_compiler_gnu
-am__api_version='1.12'
+am__api_version='1.14'
ac_aux_dir=
for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
@@ -2821,8 +2828,8 @@ if test x"${MISSING+set}" != xset; then
esac
fi
# Use eval to expand $SHELL
-if eval "$MISSING --run true"; then
- am_missing_run="$MISSING --run "
+if eval "$MISSING --is-lightweight"; then
+ am_missing_run="$MISSING "
else
am_missing_run=
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
@@ -3062,6 +3069,45 @@ else
fi
rmdir .tst 2>/dev/null
+# Check whether --enable-silent-rules was given.
+if test "${enable_silent_rules+set}" = set; then :
+ enableval=$enable_silent_rules;
+fi
+
+case $enable_silent_rules in # (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=1;;
+esac
+am_make=${MAKE-make}
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
+$as_echo_n "checking whether $am_make supports nested variables... " >&6; }
+if ${am_cv_make_support_nested_variables+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if $as_echo 'TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
+$as_echo "$am_cv_make_support_nested_variables" >&6; }
+if test $am_cv_make_support_nested_variables = yes; then
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AM_BACKSLASH='\'
+
if test "`cd $srcdir && pwd`" != "`pwd`"; then
# Use -I$(srcdir) only when $(srcdir) != ., so that make's output
# is not polluted with repeated "-I."
@@ -3125,6 +3171,10 @@ mkdir_p='$(MKDIR_P)'
# in the wild :-( We should find a proper way to deprecate it ...
AMTAR='$${TAR-tar}'
+
+# We'll loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar pax cpio none'
+
am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
@@ -3132,6 +3182,48 @@ am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <http://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5
+ fi
+fi
+
# AC_CONFIG_SRCDIR([include/common.h])
@@ -4448,6 +4540,65 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
+$as_echo_n "checking whether $CC understands -c and -o together... " >&6; }
+if ${am_cv_prog_cc_c_o+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ # Make sure it works both with $CC and with simple cc.
+ # Following AC_PROG_CC_C_O, we do the test twice because some
+ # compilers refuse to overwrite an existing .o file with -o,
+ # though they will create one.
+ am_cv_prog_cc_c_o=yes
+ for am_i in 1 2; do
+ if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5
+ ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } \
+ && test -f conftest2.$ac_objext; then
+ : OK
+ else
+ am_cv_prog_cc_c_o=no
+ break
+ fi
+ done
+ rm -f core conftest*
+ unset am_i
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
+$as_echo "$am_cv_prog_cc_c_o" >&6; }
+if test "$am_cv_prog_cc_c_o" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
depcc="$CC" am_compiler_list=
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
@@ -15533,9 +15684,6 @@ else
fi
-$as_echo "#define FLOAT_ABI \$float_abi" >>confdefs.h
-
-
# Check whether --enable-jni was given.
if test "${enable_jni+set}" = set; then :
enableval=$enable_jni; have_jni=$enableval
@@ -15747,7 +15895,7 @@ else
JAVA_TEST=Test.java
CLASS_TEST=Test.class
cat << \EOF > $JAVA_TEST
-/* #line 15750 "configure" */
+/* #line 15898 "configure" */
public class Test {
}
EOF
@@ -18377,7 +18525,7 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
case $ac_file$ac_mode in
"depfiles":C) test x"$AMDEP_TRUE" != x"" || {
- # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # Older Autoconf quotes --file arguments for eval, but not when files
# are listed without --file. Let's play safe and only enable the eval
# if we detect the quoting.
case $CONFIG_FILES in
@@ -18428,7 +18576,7 @@ $as_echo X"$mf" |
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
test -z "$DEPDIR" && continue
am__include=`sed -n 's/^am__include = //p' < "$mf"`
- test -z "am__include" && continue
+ test -z "$am__include" && continue
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
# Find all dependency output files, they are included files with
# $(DEPDIR) in their names. We invoke sed twice because it is the
diff --git a/lib/ffts/include/ffts.h b/lib/ffts/include/ffts.h
index a8750e0..cc85a88 100644
--- a/lib/ffts/include/ffts.h
+++ b/lib/ffts/include/ffts.h
@@ -1,7 +1,7 @@
/*
-
+
This file is part of FFTS.
-
+
Copyright (c) 2012, Anthony M. Blake
All rights reserved.
@@ -29,40 +29,82 @@
*/
-#ifndef __FFTS_H__
-#define __FFTS_H__
+#ifndef FFTS_H
+#define FFTS_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
-#include <stdio.h>
-#include <stdlib.h>
-#include <math.h>
-#include <stdint.h>
#include <stddef.h>
#ifdef __cplusplus
-extern "C"
-{
-#endif /* __cplusplus */
+extern "C" {
+#endif
+
+#if (defined(_WIN32) || defined(WIN32)) && defined(FFTS_SHARED)
+# ifdef FFTS_BUILD
+# define FFTS_API __declspec(dllexport)
+# else
+# define FFTS_API __declspec(dllimport)
+# endif
+#else
+# if (__GNUC__ >= 4) || defined(HAVE_GCC_VISIBILITY)
+# define FFTS_API __attribute__ ((visibility("default")))
+# else
+# define FFTS_API
+# endif
+#endif
+
+/* The direction of the transform
+ (i.e, the sign of the exponent in the transform.)
+*/
+#define FFTS_FORWARD (-1)
+#define FFTS_BACKWARD (+1)
struct _ffts_plan_t;
typedef struct _ffts_plan_t ffts_plan_t;
-ffts_plan_t *ffts_init_1d(size_t N, int sign);
-ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign);
-ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign);
+/* Complex data is stored in the interleaved format
+ (i.e, the real and imaginary parts composing each
+ element of complex data are stored adjacently in memory)
+
+ The multi-dimensional arrays passed are expected to be
+ stored as a single contiguous block in row-major order
+*/
+FFTS_API ffts_plan_t*
+ffts_init_1d(size_t N, int sign);
-// For real transforms, sign == -1 implies a real-to-complex forwards tranform,
-// and sign == 1 implies a complex-to-real backwards transform
-// The output of a real-to-complex transform is N/2+1 complex numbers, where the
-// redundant outputs have been omitted.
-ffts_plan_t *ffts_init_1d_real(size_t N, int sign);
-ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign);
-ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign);
+FFTS_API ffts_plan_t*
+ffts_init_2d(size_t N1, size_t N2, int sign);
-void ffts_execute(ffts_plan_t * , const void *input, void *output);
-void ffts_free(ffts_plan_t *);
+FFTS_API ffts_plan_t*
+ffts_init_nd(int rank, size_t *Ns, int sign);
-#ifdef __cplusplus
-} /* extern "C" */
-#endif /* __cplusplus */
+/* For real transforms, sign == FFTS_FORWARD implies a real-to-complex
+ forwards tranform, and sign == FFTS_BACKWARD implies a complex-to-real
+ backwards transform.
+
+ The output of a real-to-complex transform is N/2+1 complex numbers,
+ where the redundant outputs have been omitted.
+*/
+FFTS_API ffts_plan_t*
+ffts_init_1d_real(size_t N, int sign);
+
+FFTS_API ffts_plan_t*
+ffts_init_2d_real(size_t N1, size_t N2, int sign);
+FFTS_API ffts_plan_t*
+ffts_init_nd_real(int rank, size_t *Ns, int sign);
+
+FFTS_API void
+ffts_execute(ffts_plan_t *p, const void *input, void *output);
+
+FFTS_API void
+ffts_free(ffts_plan_t *p);
+
+#ifdef __cplusplus
+}
#endif
+
+#endif /* FFTS_H */
diff --git a/lib/ffts/java/Makefile.in b/lib/ffts/java/Makefile.in
index 45d41c4..da274b2 100644
--- a/lib/ffts/java/Makefile.in
+++ b/lib/ffts/java/Makefile.in
@@ -1,7 +1,7 @@
-# Makefile.in generated by automake 1.12.4 from Makefile.am.
+# Makefile.in generated by automake 1.14 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994-2012 Free Software Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -20,23 +20,51 @@
VPATH = @srcdir@
-am__make_dryrun = \
- { \
- am__dry=no; \
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
- echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
- | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
- *) \
- for am__flg in $$MAKEFLAGS; do \
- case $$am__flg in \
- *=*|--*) ;; \
- *n*) am__dry=yes; break;; \
- esac; \
- done;; \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
- test $$am__dry = yes; \
- }
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -56,7 +84,7 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = java
-DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(top_srcdir)/depcomp
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_check_classpath.m4 \
@@ -109,23 +137,49 @@ am__libffts_jni_la_SOURCES_DIST = jni/ffts_jni.c
@ENABLE_JNI_TRUE@am_libffts_jni_la_OBJECTS = \
@ENABLE_JNI_TRUE@ libffts_jni_la-ffts_jni.lo
libffts_jni_la_OBJECTS = $(am_libffts_jni_la_OBJECTS)
-libffts_jni_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=link $(CCLD) $(libffts_jni_la_CFLAGS) \
- $(CFLAGS) $(libffts_jni_la_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 =
+libffts_jni_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+ $(libffts_jni_la_CFLAGS) $(CFLAGS) $(libffts_jni_la_LDFLAGS) \
+ $(LDFLAGS) -o $@
@ENABLE_JNI_TRUE@am_libffts_jni_la_rpath = -rpath $(libdir)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
-LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
- --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
- $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+ $(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo " CC " $@;
+am__v_CC_1 =
CCLD = $(CC)
-LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
- --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
- $(LDFLAGS) -o $@
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+ $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo " CCLD " $@;
+am__v_CCLD_1 =
SOURCES = $(libffts_jni_la_SOURCES)
DIST_SOURCES = $(am__libffts_jni_la_SOURCES_DIST)
am__can_run_installinfo = \
@@ -135,11 +189,29 @@ am__can_run_installinfo = \
esac
DATA = $(pkgdata_DATA)
HEADERS = $(nodist_include_HEADERS)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
@@ -313,6 +385,7 @@ $(top_srcdir)/configure: $(am__configure_deps)
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
+
install-libLTLIBRARIES: $(lib_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
@@ -347,8 +420,9 @@ clean-libLTLIBRARIES:
echo rm -f $${locs}; \
rm -f $${locs}; \
}
+
libffts_jni.la: $(libffts_jni_la_OBJECTS) $(libffts_jni_la_DEPENDENCIES) $(EXTRA_libffts_jni_la_DEPENDENCIES)
- $(libffts_jni_la_LINK) $(am_libffts_jni_la_rpath) $(libffts_jni_la_OBJECTS) $(libffts_jni_la_LIBADD) $(LIBS)
+ $(AM_V_CCLD)$(libffts_jni_la_LINK) $(am_libffts_jni_la_rpath) $(libffts_jni_la_OBJECTS) $(libffts_jni_la_LIBADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
@@ -359,32 +433,32 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libffts_jni_la-ffts_jni.Plo@am__quote@
.c.o:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
.c.obj:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.c.lo:
-@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
libffts_jni_la-ffts_jni.lo: jni/ffts_jni.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libffts_jni_la_CFLAGS) $(CFLAGS) -MT libffts_jni_la-ffts_jni.lo -MD -MP -MF $(DEPDIR)/libffts_jni_la-ffts_jni.Tpo -c -o libffts_jni_la-ffts_jni.lo `test -f 'jni/ffts_jni.c' || echo '$(srcdir)/'`jni/ffts_jni.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/libffts_jni_la-ffts_jni.Tpo $(DEPDIR)/libffts_jni_la-ffts_jni.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='jni/ffts_jni.c' object='libffts_jni_la-ffts_jni.lo' libtool=yes @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libffts_jni_la_CFLAGS) $(CFLAGS) -MT libffts_jni_la-ffts_jni.lo -MD -MP -MF $(DEPDIR)/libffts_jni_la-ffts_jni.Tpo -c -o libffts_jni_la-ffts_jni.lo `test -f 'jni/ffts_jni.c' || echo '$(srcdir)/'`jni/ffts_jni.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libffts_jni_la-ffts_jni.Tpo $(DEPDIR)/libffts_jni_la-ffts_jni.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='jni/ffts_jni.c' object='libffts_jni_la-ffts_jni.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libffts_jni_la_CFLAGS) $(CFLAGS) -c -o libffts_jni_la-ffts_jni.lo `test -f 'jni/ffts_jni.c' || echo '$(srcdir)/'`jni/ffts_jni.c
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libffts_jni_la_CFLAGS) $(CFLAGS) -c -o libffts_jni_la-ffts_jni.lo `test -f 'jni/ffts_jni.c' || echo '$(srcdir)/'`jni/ffts_jni.c
mostlyclean-libtool:
-rm -f *.lo
@@ -434,26 +508,15 @@ uninstall-nodist_includeHEADERS:
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir)
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- mkid -fID $$unique
-tags: TAGS
-
-TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ $(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
@@ -465,15 +528,11 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$$unique; \
fi; \
fi
-ctags: CTAGS
-CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
@@ -482,9 +541,10 @@ GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
-cscopelist: $(HEADERS) $(SOURCES) $(LISP)
- list='$(SOURCES) $(HEADERS) $(LISP)'; \
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
@@ -646,9 +706,9 @@ uninstall-am: uninstall-libLTLIBRARIES uninstall-nodist_includeHEADERS \
.MAKE: all check install install-am install-strip
-.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
- clean-libLTLIBRARIES clean-libtool clean-local cscopelist \
- ctags distclean distclean-compile distclean-generic \
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+ clean-libLTLIBRARIES clean-libtool clean-local cscopelist-am \
+ ctags ctags-am distclean distclean-compile distclean-generic \
distclean-libtool distclean-tags distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dvi install-dvi-am install-exec \
@@ -659,7 +719,7 @@ uninstall-am: uninstall-libLTLIBRARIES uninstall-nodist_includeHEADERS \
installcheck installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
- tags uninstall uninstall-am uninstall-libLTLIBRARIES \
+ tags tags-am uninstall uninstall-am uninstall-libLTLIBRARIES \
uninstall-nodist_includeHEADERS uninstall-pkgdataDATA
diff --git a/lib/ffts/java/android/.classpath b/lib/ffts/java/android/.classpath
new file mode 100644
index 0000000..46f15fa
--- /dev/null
+++ b/lib/ffts/java/android/.classpath
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry kind="src" path="gen"/>
+ <classpathentry kind="src" path="src"/>
+ <classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>
+ <classpathentry exported="true" kind="con" path="com.android.ide.eclipse.adt.LIBRARIES"/>
+ <classpathentry exported="true" kind="con" path="com.android.ide.eclipse.adt.DEPENDENCIES"/>
+ <classpathentry kind="output" path="bin/classes"/>
+</classpath>
diff --git a/lib/ffts/java/android/.project b/lib/ffts/java/android/.project
new file mode 100644
index 0000000..aff6a0c
--- /dev/null
+++ b/lib/ffts/java/android/.project
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>ffts-android</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ApkBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>com.android.ide.eclipse.adt.AndroidNature</nature>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ </natures>
+ <linkedResources>
+ <link>
+ <name>src</name>
+ <type>2</type>
+ <locationURI>PARENT-1-PROJECT_LOC/src</locationURI>
+ </link>
+ </linkedResources>
+</projectDescription>
diff --git a/lib/ffts/java/android/.settings/org.eclipse.jdt.core.prefs b/lib/ffts/java/android/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000..b080d2d
--- /dev/null
+++ b/lib/ffts/java/android/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,4 @@
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
+org.eclipse.jdt.core.compiler.source=1.6
diff --git a/lib/ffts/java/android/.settings/org.eclipse.ltk.core.refactoring.prefs b/lib/ffts/java/android/.settings/org.eclipse.ltk.core.refactoring.prefs
new file mode 100644
index 0000000..b196c64
--- /dev/null
+++ b/lib/ffts/java/android/.settings/org.eclipse.ltk.core.refactoring.prefs
@@ -0,0 +1,2 @@
+eclipse.preferences.version=1
+org.eclipse.ltk.core.refactoring.enable.project.refactoring.history=false
diff --git a/lib/ffts/java/android/AndroidManifest.xml b/lib/ffts/java/android/AndroidManifest.xml
new file mode 100644
index 0000000..6475e8e
--- /dev/null
+++ b/lib/ffts/java/android/AndroidManifest.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="utf-8"?>
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="nz.waikato.ffts"
+ android:versionCode="1"
+ android:versionName="1.0">
+ <uses-sdk android:minSdkVersion="8" />
+</manifest>
diff --git a/lib/ffts/java/android/ant.properties b/lib/ffts/java/android/ant.properties
new file mode 100644
index 0000000..95346d2
--- /dev/null
+++ b/lib/ffts/java/android/ant.properties
@@ -0,0 +1,18 @@
+# This file is used to override default values used by the Ant build system.
+#
+# This file must be checked into Version Control Systems, as it is
+# integral to the build system of your project.
+
+# This file is only used by the Ant script.
+
+# You can use this to override default values such as
+# 'source.dir' for the location of your java source folder and
+# 'out.dir' for the location of your output folder.
+source.dir=../src
+
+# You can also use it define how the release builds are signed by declaring
+# the following properties:
+# 'key.store' for the location of your keystore and
+# 'key.alias' for the name of the key to use.
+# The password will be asked during the build when you use the 'release' target.
+
diff --git a/lib/ffts/java/android/build.xml b/lib/ffts/java/android/build.xml
new file mode 100644
index 0000000..4d6cc3d
--- /dev/null
+++ b/lib/ffts/java/android/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="ffts" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: 1 -->
+ <import file="${sdk.dir}/tools/ant/build.xml" />
+
+</project>
diff --git a/lib/ffts/java/android/jni/Android.mk b/lib/ffts/java/android/jni/Android.mk
new file mode 100644
index 0000000..879ef4d
--- /dev/null
+++ b/lib/ffts/java/android/jni/Android.mk
@@ -0,0 +1,25 @@
+LOCAL_PATH := $(call my-dir)
+
+TOP=../../..
+
+# Include the shared library
+#include $(CLEAR_VARS)
+#LOCAL_MODULE := ffts
+#LOCAL_SRC_FILES := ../../../src/.libs/libffts.so
+#include $(PREBUILT_SHARED_LIBRARY)
+
+# Include the static library in shared lib
+include $(CLEAR_VARS)
+LOCAL_MODULE := ffts
+LOCAL_SRC_FILES := $(TOP)/java/android/bin/lib/libffts.a
+LOCAL_EXPORT_C_INCLUDES := $(TOP)/include
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := ffts_jni
+LOCAL_CFLAGS := -I$(TOP)/include -I$(TOP)/java/jni -I$(TOP) -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast
+LOCAL_SRC_FILES := $(TOP)/java/jni/ffts_jni.c
+LOCAL_LDLIBS := -L$(SYSROOT)/usr/lib -llog
+LOCAL_STATIC_LIBRARIES := ffts
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/lib/ffts/java/android/jni/Application.mk b/lib/ffts/java/android/jni/Application.mk
new file mode 100644
index 0000000..12c9df6
--- /dev/null
+++ b/lib/ffts/java/android/jni/Application.mk
@@ -0,0 +1,2 @@
+# requires NEON atm
+APP_ABI := armeabi-v7a
diff --git a/lib/ffts/java/android/proguard-project.txt b/lib/ffts/java/android/proguard-project.txt
new file mode 100644
index 0000000..f2fe155
--- /dev/null
+++ b/lib/ffts/java/android/proguard-project.txt
@@ -0,0 +1,20 @@
+# To enable ProGuard in your project, edit project.properties
+# to define the proguard.config property as described in that file.
+#
+# Add project specific ProGuard rules here.
+# By default, the flags in this file are appended to flags specified
+# in ${sdk.dir}/tools/proguard/proguard-android.txt
+# You can edit the include path and order by changing the ProGuard
+# include property in project.properties.
+#
+# For more details, see
+# http://developer.android.com/guide/developing/tools/proguard.html
+
+# Add any project specific keep options here:
+
+# If your project uses WebView with JS, uncomment the following
+# and specify the fully qualified class name to the JavaScript interface
+# class:
+#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
+# public *;
+#}
diff --git a/lib/ffts/java/android/project.properties b/lib/ffts/java/android/project.properties
new file mode 100644
index 0000000..88ca83f
--- /dev/null
+++ b/lib/ffts/java/android/project.properties
@@ -0,0 +1,15 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+android.library=true
+# Project target.
+target=android-10
diff --git a/lib/ffts/java/jni/ffts_jni.c b/lib/ffts/java/jni/ffts_jni.c
index 41fdf76..d044f95 100644
--- a/lib/ffts/java/jni/ffts_jni.c
+++ b/lib/ffts/java/jni/ffts_jni.c
@@ -38,6 +38,8 @@
// the classes ... but we can't build the project without the jni.
#ifdef ANDROID
#include <jni.h>
+#define NEEDS_ALIGNED
+#undef HAVE_DECL_POSIX_MEMALIGN
#else
#include "nz_ac_waikato_ffts_FFTS.h"
#endif
@@ -231,3 +233,5 @@ JNIEXPORT void JNICALL Java_nz_ac_waikato_ffts_FFTS_free
ffts_free(plan);
}
+
+// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3:
diff --git a/lib/ffts/java/src/nz/ac/waikato/ffts/FFTS.java b/lib/ffts/java/src/nz/ac/waikato/ffts/FFTS.java
new file mode 100644
index 0000000..5b6771c
--- /dev/null
+++ b/lib/ffts/java/src/nz/ac/waikato/ffts/FFTS.java
@@ -0,0 +1,203 @@
+/*
+ * This file is part of FFTS -- The Fastest Fourier Transform in the South
+ *
+ * Copyright (c) 2013, Michael Zucchi <notzed@gmail.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the organization nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package nz.ac.waikato.ffts;
+
+import java.nio.FloatBuffer;
+
+/**
+ * A java wrapper for ffts plans.
+ *
+ * Plans must currently be freed explicitly.
+ *
+ * @author notzed
+ */
+public class FFTS {
+
+ /**
+ * C pointer
+ */
+ private long p;
+ /**
+ * Minimum size of input
+ */
+ final protected long inSize;
+ /**
+ * Minimum size of output
+ */
+ final protected long outSize;
+
+ private FFTS(long p, long inSize) {
+ this(p, inSize, inSize);
+ }
+
+ private FFTS(long p, long inSize, long outSize) {
+ this.p = p;
+ this.inSize = inSize;
+ this.outSize = inSize;
+ }
+ /**
+ * The sign to use for a forward transform.
+ */
+ public static final int FORWARD = -1;
+ /**
+ * The sign to use for a backward transform.
+ */
+ public static final int BACKWARD = 1;
+
+ /**
+ * Create a FFT plan for a 1-dimensional complex transform.
+ *
+ * The src and dst parameters to execute() use complex data.
+ *
+ * @param sign The direction of the transform.
+ * @param N The size of the transform.
+ * @return
+ */
+ public static FFTS complex(int sign, int N) {
+ return new FFTS(complex_1d(N, sign), N * 2);
+ }
+
+ /**
+ * Create a FFT plan for a 2-dimensional complex transform.
+ * @param sign The direction of the transform.
+ * @param N1 The size of the transform.
+ * @param N2 The size of the transform.
+ * @return
+ */
+ public static FFTS complex(int sign, int N1, int N2) {
+ return new FFTS(complex_2d(N1, N2, sign), N1 * N2 * 2);
+ }
+
+ public static FFTS complex(int sign, int... Ns) {
+ return new FFTS(complex_nd(Ns, sign), size(Ns) * 2);
+ }
+
+ public static FFTS real(int sign, int N) {
+ return new FFTS(real_1d(N, sign), sign == FORWARD ? N : (N / 2 + 1) * 2, sign == FORWARD ? (N / 2 + 1) * 2 : N);
+ }
+
+ public static FFTS real(int sign, int N1, int N2) {
+ return new FFTS(real_2d(N1, N2, sign), sign == FORWARD ? N1 * N2 : (N1 * N2 / 2 + 1) * 2, sign == FORWARD ? (N1 * N2 / 2 + 1) * 2 : N1 * N2);
+ }
+
+ public static FFTS real(int sign, int... Ns) {
+ return new FFTS(real_nd(Ns, sign), sign == FORWARD ? size(Ns) : (size(Ns) / 2 + 1) * 2, sign == FORWARD ? (size(Ns) / 2 + 1) * 2 : size(Ns));
+ }
+
+ /**
+ * Execute this plan with the given array data.
+ *
+ * @param src
+ * @param dst
+ */
+ public void execute(float[] src, float[] dst) {
+ execute(src, 0, dst, 0);
+ }
+
+ /**
+ * Execute this plan with the given array data.
+ * @param src
+ * @param soff Start offset into src array.
+ * @param dst
+ * @param doff Start offset into dst array.
+ */
+ public void execute(float[] src, int soff, float[] dst, int doff) {
+ if (src.length - soff < inSize || dst.length - doff < outSize)
+ throw new ArrayIndexOutOfBoundsException();
+ if (p == 0)
+ throw new NullPointerException();
+
+ execute(p, inSize, src, soff, dst, doff);
+ }
+
+ /**
+ * Execute this plan with the given nio buffers. The bufffers
+ * must be derived from direct buffers.
+ *
+ * The buffer position and limits are ignored.
+ *
+ * @param src
+ * @param dst
+ */
+ public void execute(FloatBuffer src, FloatBuffer dst) {
+ if (src.capacity() < inSize || dst.capacity() < outSize)
+ throw new ArrayIndexOutOfBoundsException();
+ if (p == 0)
+ throw new NullPointerException();
+
+ execute(p, inSize, src, dst);
+ }
+
+ /**
+ * Free the plan.
+ */
+ public void free() {
+ if (p == 0)
+ throw new NullPointerException();
+ free(p);
+ }
+
+ /*
+ * Calculate the number of elements required to store one
+ * set of n-dimensional data.
+ */
+ protected static long size(int[] Ns) {
+ long s = Ns[0];
+ for (int i = 1; i < Ns.length; i++)
+ s *= Ns[i];
+ return s;
+ }
+
+ static {
+ System.loadLibrary("ffts_jni");
+ }
+
+ /*
+ * Native interface
+ */
+ protected static native long complex_1d(int N, int sign);
+
+ protected static native long complex_2d(int N1, int N2, int sign);
+
+ protected static native long complex_nd(int[] Ns, int sign);
+
+ protected static native long real_1d(int N, int sign);
+
+ protected static native long real_2d(int N1, int N2, int sign);
+
+ protected static native long real_nd(int[] Ns, int sign);
+
+ protected static native void execute(long p, long size, float[] src, int soff, float[] dst, int doff);
+
+ protected static native void execute(long p, long size, FloatBuffer src, FloatBuffer dst);
+
+ protected static native void free(long p);
+}
diff --git a/lib/ffts/m4/ax_check_class.m4 b/lib/ffts/m4/ax_check_class.m4
new file mode 100644
index 0000000..098aa77
--- /dev/null
+++ b/lib/ffts/m4/ax_check_class.m4
@@ -0,0 +1,144 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_check_class.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CHECK_CLASS
+#
+# DESCRIPTION
+#
+# AX_CHECK_CLASS tests the existence of a given Java class, either in a
+# jar or in a '.class' file.
+#
+# *Warning*: its success or failure can depend on a proper setting of the
+# CLASSPATH env. variable.
+#
+# Note: This is part of the set of autoconf M4 macros for Java programs.
+# It is VERY IMPORTANT that you download the whole set, some macros depend
+# on other. Unfortunately, the autoconf archive does not support the
+# concept of set of macros, so I had to break it for submission. The
+# general documentation, as well as the sample configure.in, is included
+# in the AX_PROG_JAVA macro.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 7
+
+AU_ALIAS([AC_CHECK_CLASS], [AX_CHECK_CLASS])
+AC_DEFUN([AX_CHECK_CLASS],[
+AC_REQUIRE([AX_PROG_JAVA])
+ac_var_name=`echo $1 | sed 's/\./_/g'`
+dnl Normaly I'd use a AC_CACHE_CHECK here but since the variable name is
+dnl dynamic I need an extra level of extraction
+AC_MSG_CHECKING([for $1 class])
+AC_CACHE_VAL(ax_cv_class_$ac_var_name, [
+if test x$ac_cv_prog_uudecode_base64 = xyes; then
+dnl /**
+dnl * Test.java: used to test dynamicaly if a class exists.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl Class lib;
+dnl if (argv.length < 1)
+dnl {
+dnl System.err.println ("Missing argument");
+dnl System.exit (77);
+dnl }
+dnl try
+dnl {
+dnl lib = Class.forName (argv[0]);
+dnl }
+dnl catch (ClassNotFoundException e)
+dnl {
+dnl System.exit (1);
+dnl }
+dnl lib = null;
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AKQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAANlcnIBABVMamF2YS9pby9QcmludFN0cmVhbTsJ
+AA0ACQcADgEAEGphdmEvbGFuZy9TeXN0ZW0IABABABBNaXNzaW5nIGFyZ3Vt
+ZW50DAASABMBAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWCgAV
+ABEHABYBABNqYXZhL2lvL1ByaW50U3RyZWFtDAAYABkBAARleGl0AQAEKEkp
+VgoADQAXDAAcAB0BAAdmb3JOYW1lAQAlKExqYXZhL2xhbmcvU3RyaW5nOylM
+amF2YS9sYW5nL0NsYXNzOwoAHwAbBwAgAQAPamF2YS9sYW5nL0NsYXNzBwAi
+AQAgamF2YS9sYW5nL0NsYXNzTm90Rm91bmRFeGNlcHRpb24BAAY8aW5pdD4B
+AAMoKVYMACMAJAoAAwAlAQAKU291cmNlRmlsZQEACVRlc3QuamF2YQAhAAEA
+AwAAAAAAAgAJAAUABgABAAcAAABtAAMAAwAAACkqvgSiABCyAAwSD7YAFBBN
+uAAaKgMyuAAeTKcACE0EuAAaAUwDuAAasQABABMAGgAdACEAAQAIAAAAKgAK
+AAAACgAAAAsABgANAA4ADgATABAAEwASAB4AFgAiABgAJAAZACgAGgABACMA
+JAABAAcAAAAhAAEAAQAAAAUqtwAmsQAAAAEACAAAAAoAAgAAAAQABAAEAAEA
+JwAAAAIAKA==
+====
+EOF
+ if $UUDECODE Test.uue; then
+ :
+ else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AS_MESSAGE_LOG_FD
+ echo "configure: failed file was:" >&AS_MESSAGE_LOG_FD
+ cat Test.uue >&AS_MESSAGE_LOG_FD
+ ac_cv_prog_uudecode_base64=no
+ fi
+ rm -f Test.uue
+ if AC_TRY_COMMAND($JAVA $JAVAFLAGS Test $1) >/dev/null 2>&1; then
+ eval "ac_cv_class_$ac_var_name=yes"
+ else
+ eval "ac_cv_class_$ac_var_name=no"
+ fi
+ rm -f Test.class
+else
+ AX_TRY_COMPILE_JAVA([$1], , [eval "ac_cv_class_$ac_var_name=yes"],
+ [eval "ac_cv_class_$ac_var_name=no"])
+fi
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+eval "HAVE_$ac_var_name=$`echo ac_cv_class_$ac_var_val`"
+HAVE_LAST_CLASS=$ac_var_val
+if test x$ac_var_val = xyes; then
+ ifelse([$2], , :, [$2])
+else
+ ifelse([$3], , :, [$3])
+fi
+])
+dnl for some reason the above statment didn't fall though here?
+dnl do scripts have variable scoping?
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+AC_MSG_RESULT($ac_var_val)
+])
diff --git a/lib/ffts/m4/ax_check_java_plugin.m4 b/lib/ffts/m4/ax_check_java_plugin.m4
new file mode 100644
index 0000000..34f3dc6
--- /dev/null
+++ b/lib/ffts/m4/ax_check_java_plugin.m4
@@ -0,0 +1,101 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_check_java_plugin.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CHECK_JAVA_PLUGIN(<shell-variable>)
+#
+# DESCRIPTION
+#
+# This macro sets <shell-variable> to empty on failure and to a compatible
+# version of plugin.jar otherwise. Directories searched are /usr/java/*
+# and /usr/local/java/*, which are assumed to be j{dk,re} installations.
+# Apply the shell variable as you see fit. If sun changes things so
+# <jre>/lib/plugin.jar is not the magic file it will stop working.
+#
+# This macro assumes that unzip, zipinfo or pkzipc is avialable (and can
+# list the contents of the jar archive). The first two are assumed to work
+# similarly enough to the infozip versisonms. The pkzipc version is
+# assumed to work if I undertstand the documentation on pkware's site but
+# YMMV. I do not have access to pwkware's version to test it.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Duncan Simpson <dps@simpson.demon.co.uk>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 6
+
+AU_ALIAS([DPS_CHECK_PLUGIN], [AX_CHECK_JAVA_PLUGIN])
+AC_DEFUN([AX_CHECK_JAVA_PLUGIN],
+[AC_REQUIRE([AC_PROG_AWK])
+AC_REQUIRE([AC_PROG_FGREP])
+AC_CHECK_PROG(ZIPINFO,[zipinfo unzip pkzipc])
+AC_MSG_CHECKING([for the java plugin])
+case "x$ZIPINFO" in
+[*/zipinfo)]
+ zipinf="zipinfo -1" ;;
+[*/unzip)]
+ zipinf="unzip -l";;
+[*/pkzipc)]
+ ziping="unzipc -view";;
+[x*)]
+ AC_MSG_RESULT([skiped, none of zipinfo, unzip and pkzipc found])
+ AC_SUBST($1,[])
+ zipinf="";;
+esac
+if test "x$zipinf" != "x"; then
+jplugin=""
+for jhome in `ls -dr /usr/java/* /usr/local/java/* 2> /dev/null`; do
+for jfile in lib/plugin.jar jre/lib/plugin.jar; do
+if test "x$jplugin" = "x" && test -f "$jhome/$jfile"; then
+eval "$zipinf $jhome/$jfile | $AWK '{ print \$NF; }' | $FGREP netscape/javascript/JSObject" >/dev/null 2>/dev/null
+if test $? -eq 0; then
+dnl Some version of gcj (and javac) refuse to work with some files
+dnl that pass this test. To stop this problem make sure that the compiler
+dnl still works with this jar file in the classpath
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+public class Test {
+}
+EOF
+if eval "$JAVAC -classpath $jhome/$jfile Test.java 2>/dev/null >/dev/null" && test -f Test.class; then
+jplugin="$jhome/$jfile"
+fi
+rm -f Test.java Test.class
+fi; fi; done; done
+if test "x$jplugin" != "x"; then
+AC_SUBST($1,$jplugin)
+AC_MSG_RESULT($jplugin)
+else
+AC_MSG_RESULT([java plugin not found])
+AC_SUBST($1,[])
+fi
+fi
+])
diff --git a/lib/ffts/m4/ax_java_check_class.m4 b/lib/ffts/m4/ax_java_check_class.m4
new file mode 100644
index 0000000..bd5052a
--- /dev/null
+++ b/lib/ffts/m4/ax_java_check_class.m4
@@ -0,0 +1,85 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_java_check_class.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_JAVA_CHECK_CLASS(<class>,<action-if-found>,<action-if-not-found>)
+#
+# DESCRIPTION
+#
+# Test if a Java class is available. Based on AX_PROG_JAVAC_WORKS. This
+# version uses a cache variable which is both compiler, options and
+# classpath dependent (so if you switch from javac to gcj it correctly
+# notices and redoes the test).
+#
+# The macro tries to compile a minimal program importing <class>. Some
+# newer compilers moan about the failure to use this but fail or produce a
+# class file anyway. All moaing is sunk to /dev/null since I only wanted
+# to know if the class could be imported. This is a recommended followup
+# to AX_CHECK_JAVA_PLUGIN with classpath appropriately adjusted.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Duncan Simpson <dps@simpson.demon.co.uk>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 8
+
+AU_ALIAS([DPS_JAVA_CHECK_CLASS], [AX_JAVA_CHECK_CLASS])
+AC_DEFUN([AX_JAVA_CHECK_CLASS],[
+m4_define([cache_val],[m4_translit(ax_cv_have_java_class_$1, " ." ,"__")])
+if test "x$CLASSPATH" != "x"; then
+xtra=" with classpath ${CLASSPATH}"
+xopts=`echo ${CLASSPATH} | ${SED} 's/^ *://'`
+xopts="-classpath $xopts"
+else xtra=""; xopts=""; fi
+cache_var="cache_val"AS_TR_SH([_Jc_${JAVAC}_Cp_${CLASSPATH}])
+AC_CACHE_CHECK([if the $1 class is avialable$xtra], [$cache_var], [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+cat << \EOF > $JAVA_TEST
+/* [#]xline __oline__ "configure" */
+import $1;
+public class Test {
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $xopts $JAVA_TEST) >/dev/null 2>&1; then
+ eval "${cache_var}=yes"
+else
+ eval "${cache_var}=no"
+ echo "configure: failed program was:" >&AS_MESSAGE_LOG_FD
+ cat $JAVA_TEST >&AS_MESSAGE_LOG_FD
+fi
+rm -f $JAVA_TEST $CLASS_TEST
+])
+if eval 'test "x$'${cache_var}'" = "xyes"'; then
+$2
+true; else
+$3
+false; fi])
diff --git a/lib/ffts/m4/ax_prog_java.m4 b/lib/ffts/m4/ax_prog_java.m4
new file mode 100644
index 0000000..5471f32
--- /dev/null
+++ b/lib/ffts/m4/ax_prog_java.m4
@@ -0,0 +1,115 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_prog_java.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PROG_JAVA
+#
+# DESCRIPTION
+#
+# Here is a summary of the main macros:
+#
+# AX_PROG_JAVAC: finds a Java compiler.
+#
+# AX_PROG_JAVA: finds a Java virtual machine.
+#
+# AX_CHECK_CLASS: finds if we have the given class (beware of CLASSPATH!).
+#
+# AX_CHECK_RQRD_CLASS: finds if we have the given class and stops
+# otherwise.
+#
+# AX_TRY_COMPILE_JAVA: attempt to compile user given source.
+#
+# AX_TRY_RUN_JAVA: attempt to compile and run user given source.
+#
+# AX_JAVA_OPTIONS: adds Java configure options.
+#
+# AX_PROG_JAVA tests an existing Java virtual machine. It uses the
+# environment variable JAVA then tests in sequence various common Java
+# virtual machines. For political reasons, it starts with the free ones.
+# You *must* call [AX_PROG_JAVAC] before.
+#
+# If you want to force a specific VM:
+#
+# - at the configure.in level, set JAVA=yourvm before calling AX_PROG_JAVA
+#
+# (but after AC_INIT)
+#
+# - at the configure level, setenv JAVA
+#
+# You can use the JAVA variable in your Makefile.in, with @JAVA@.
+#
+# *Warning*: its success or failure can depend on a proper setting of the
+# CLASSPATH env. variable.
+#
+# TODO: allow to exclude virtual machines (rationale: most Java programs
+# cannot run with some VM like kaffe).
+#
+# Note: This is part of the set of autoconf M4 macros for Java programs.
+# It is VERY IMPORTANT that you download the whole set, some macros depend
+# on other. Unfortunately, the autoconf archive does not support the
+# concept of set of macros, so I had to break it for submission.
+#
+# A Web page, with a link to the latest CVS snapshot is at
+# <http://www.internatif.org/bortzmeyer/autoconf-Java/>.
+#
+# This is a sample configure.in Process this file with autoconf to produce
+# a configure script.
+#
+# AC_INIT(UnTag.java)
+#
+# dnl Checks for programs.
+# AC_CHECK_CLASSPATH
+# AX_PROG_JAVAC
+# AX_PROG_JAVA
+#
+# dnl Checks for classes
+# AX_CHECK_RQRD_CLASS(org.xml.sax.Parser)
+# AX_CHECK_RQRD_CLASS(com.jclark.xml.sax.Driver)
+#
+# AC_OUTPUT(Makefile)
+#
+# LICENSE
+#
+# Copyright (c) 2008 Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 8
+
+AU_ALIAS([AC_PROG_JAVA], [AX_PROG_JAVA])
+AC_DEFUN([AX_PROG_JAVA],[
+if test x$JAVAPREFIX = x; then
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe java)
+else
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe java, $JAVAPREFIX)
+fi
+test x$JAVA = x && AC_MSG_ERROR([no acceptable Java virtual machine found in \$PATH])
+AX_PROG_JAVA_WORKS
+AC_PROVIDE([$0])dnl
+])
diff --git a/lib/ffts/m4/ax_prog_java_cc.m4 b/lib/ffts/m4/ax_prog_java_cc.m4
new file mode 100644
index 0000000..3df064f
--- /dev/null
+++ b/lib/ffts/m4/ax_prog_java_cc.m4
@@ -0,0 +1,104 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_prog_java_cc.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PROG_JAVA_CC
+#
+# DESCRIPTION
+#
+# Finds the appropriate java compiler on your path. By preference the java
+# compiler is gcj, then jikes then javac.
+#
+# The macro can take one argument specifying a space separated list of
+# java compiler names.
+#
+# For example:
+#
+# AX_PROG_JAVA_CC(javac, gcj)
+#
+# The macro also sets the compiler options variable: JAVA_CC_OPTS to
+# something sensible:
+#
+# - for GCJ it sets it to: @GCJ_OPTS@
+# (if GCJ_OPTS is not yet defined then it is set to "-C")
+#
+# - no other compiler has applicable options yet
+#
+# Here's an example configure.in:
+#
+# AC_INIT(Makefile.in)
+# AX_PROG_JAVA_CC()
+# AC_OUTPUT(Makefile)
+# dnl End.
+#
+# And here's the start of the Makefile.in:
+#
+# PROJECT_ROOT := @srcdir@
+# # Tool definitions.
+# JAVAC := @JAVA_CC@
+# JAVAC_OPTS := @JAVA_CC_OPTS@
+# JAR_TOOL := @jar_tool@
+#
+# LICENSE
+#
+# Copyright (c) 2008 Nic Ferrier <nferrier@tapsellferrier.co.uk>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 4
+
+# AX_PROG_JAVA_CC([COMPILER ...])
+# --------------------------
+# COMPILER ... is a space separated list of java compilers to search for.
+# This just gives the user an opportunity to specify an alternative
+# search list for the java compiler.
+AU_ALIAS([AC_PROG_JAVA_CC], [AX_PROG_JAVA_CC])
+AC_DEFUN([AX_PROG_JAVA_CC],
+[AC_ARG_VAR([JAVA_CC], [java compiler command])dnl
+AC_ARG_VAR([JAVA_CC_FLAGS], [java compiler flags])dnl
+m4_ifval([$1],
+ [AC_CHECK_TOOLS(JAVA_CC, [$1])],
+[AC_CHECK_TOOL(JAVA_CC, gcj)
+if test -z "$JAVA_CC"; then
+ AC_CHECK_TOOL(JAVA_CC, javac)
+fi
+if test -z "$JAVA_CC"; then
+ AC_CHECK_TOOL(JAVA_CC, jikes)
+fi
+])
+
+if test "$JAVA_CC" = "gcj"; then
+ if test "$GCJ_OPTS" = ""; then
+ AC_SUBST(GCJ_OPTS,-C)
+ fi
+ AC_SUBST(JAVA_CC_OPTS, @GCJ_OPTS@,
+ [Define the compilation options for GCJ])
+fi
+test -z "$JAVA_CC" && AC_MSG_ERROR([no acceptable java compiler found in \$PATH])
+])# AX_PROG_JAVA_CC
diff --git a/lib/ffts/m4/ax_prog_java_works.m4 b/lib/ffts/m4/ax_prog_java_works.m4
new file mode 100644
index 0000000..741bd56
--- /dev/null
+++ b/lib/ffts/m4/ax_prog_java_works.m4
@@ -0,0 +1,134 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_prog_java_works.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PROG_JAVA_WORKS
+#
+# DESCRIPTION
+#
+# Internal use ONLY.
+#
+# Note: This is part of the set of autoconf M4 macros for Java programs.
+# It is VERY IMPORTANT that you download the whole set, some macros depend
+# on other. Unfortunately, the autoconf archive does not support the
+# concept of set of macros, so I had to break it for submission. The
+# general documentation, as well as the sample configure.in, is included
+# in the AX_PROG_JAVA macro.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 8
+
+AU_ALIAS([AC_PROG_JAVA_WORKS], [AX_PROG_JAVA_WORKS])
+AC_DEFUN([AX_PROG_JAVA_WORKS], [
+AC_PATH_PROG(UUDECODE, uudecode, [no])
+if test x$UUDECODE != xno; then
+AC_CACHE_CHECK([if uudecode can decode base 64 file], ac_cv_prog_uudecode_base64, [
+dnl /**
+dnl * Test.java: used to test if java compiler works.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AFQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAARleGl0AQAEKEkpVgoADQAJBwAOAQAQamF2YS9s
+YW5nL1N5c3RlbQEABjxpbml0PgEAAygpVgwADwAQCgADABEBAApTb3VyY2VG
+aWxlAQAJVGVzdC5qYXZhACEAAQADAAAAAAACAAkABQAGAAEABwAAACEAAQAB
+AAAABQO4AAyxAAAAAQAIAAAACgACAAAACgAEAAsAAQAPABAAAQAHAAAAIQAB
+AAEAAAAFKrcAErEAAAABAAgAAAAKAAIAAAAEAAQABAABABMAAAACABQ=
+====
+EOF
+if $UUDECODE Test.uue; then
+ ac_cv_prog_uudecode_base64=yes
+else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AS_MESSAGE_LOG_FD
+ echo "configure: failed file was:" >&AS_MESSAGE_LOG_FD
+ cat Test.uue >&AS_MESSAGE_LOG_FD
+ ac_cv_prog_uudecode_base64=no
+fi
+rm -f Test.uue])
+fi
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ rm -f Test.class
+ AC_MSG_WARN([I have to compile Test.class from scratch])
+ if test x$ac_cv_prog_javac_works = xno; then
+ AC_MSG_ERROR([Cannot compile java source. $JAVAC does not work properly])
+ fi
+ if test x$ac_cv_prog_javac_works = x; then
+ AX_PROG_JAVAC
+ fi
+fi
+AC_CACHE_CHECK(if $JAVA works, ac_cv_prog_java_works, [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+TEST=Test
+changequote(, )dnl
+cat << \EOF > $JAVA_TEST
+/* [#]line __oline__ "configure" */
+public class Test {
+public static void main (String args[]) {
+ System.exit (0);
+} }
+EOF
+changequote([, ])dnl
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) && test -s $CLASS_TEST; then
+ :
+ else
+ echo "configure: failed program was:" >&AS_MESSAGE_LOG_FD
+ cat $JAVA_TEST >&AS_MESSAGE_LOG_FD
+ AC_MSG_ERROR(The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?))
+ fi
+fi
+if AC_TRY_COMMAND($JAVA $JAVAFLAGS $TEST) >/dev/null 2>&1; then
+ ac_cv_prog_java_works=yes
+else
+ echo "configure: failed program was:" >&AS_MESSAGE_LOG_FD
+ cat $JAVA_TEST >&AS_MESSAGE_LOG_FD
+ AC_MSG_ERROR(The Java VM $JAVA failed (see config.log, check the CLASSPATH?))
+fi
+rm -fr $JAVA_TEST $CLASS_TEST Test.uue
+])
+AC_PROVIDE([$0])dnl
+]
+)
diff --git a/lib/ffts/m4/ax_prog_javadoc.m4 b/lib/ffts/m4/ax_prog_javadoc.m4
new file mode 100644
index 0000000..508375d
--- /dev/null
+++ b/lib/ffts/m4/ax_prog_javadoc.m4
@@ -0,0 +1,52 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_prog_javadoc.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PROG_JAVADOC
+#
+# DESCRIPTION
+#
+# AX_PROG_JAVADOC tests for an existing javadoc generator. It uses the
+# environment variable JAVADOC then tests in sequence various common
+# javadoc generator.
+#
+# If you want to force a specific compiler:
+#
+# - at the configure.in level, set JAVADOC=yourgenerator before calling
+# AX_PROG_JAVADOC
+#
+# - at the configure level, setenv JAVADOC
+#
+# You can use the JAVADOC variable in your Makefile.in, with @JAVADOC@.
+#
+# Note: This macro depends on the autoconf M4 macros for Java programs. It
+# is VERY IMPORTANT that you download that whole set, some macros depend
+# on other. Unfortunately, the autoconf archive does not support the
+# concept of set of macros, so I had to break it for submission.
+#
+# The general documentation of those macros, as well as the sample
+# configure.in, is included in the AX_PROG_JAVA macro.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Egon Willighagen <e.willighagen@science.ru.nl>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 7
+
+AU_ALIAS([AC_PROG_JAVADOC], [AX_PROG_JAVADOC])
+AC_DEFUN([AX_PROG_JAVADOC],[
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc)
+else
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc, $JAVAPREFIX)
+fi
+test "x$JAVADOC" = x && AC_MSG_ERROR([no acceptable javadoc generator found in \$PATH])
+AC_PROVIDE([$0])dnl
+])
diff --git a/lib/ffts/m4/ax_prog_javah.m4 b/lib/ffts/m4/ax_prog_javah.m4
new file mode 100644
index 0000000..7afe410
--- /dev/null
+++ b/lib/ffts/m4/ax_prog_javah.m4
@@ -0,0 +1,43 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_prog_javah.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PROG_JAVAH
+#
+# DESCRIPTION
+#
+# AX_PROG_JAVAH tests the availability of the javah header generator and
+# looks for the jni.h header file. If available, JAVAH is set to the full
+# path of javah and CPPFLAGS is updated accordingly.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Luc Maisonobe <luc@spaceroots.org>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 5
+
+AU_ALIAS([AC_PROG_JAVAH], [AX_PROG_JAVAH])
+AC_DEFUN([AX_PROG_JAVAH],[
+AC_REQUIRE([AC_CANONICAL_SYSTEM])dnl
+AC_REQUIRE([AC_PROG_CPP])dnl
+AC_PATH_PROG(JAVAH,javah)
+if test x"`eval 'echo $ac_cv_path_JAVAH'`" != x ; then
+ AC_TRY_CPP([#include <jni.h>],,[
+ ac_save_CPPFLAGS="$CPPFLAGS"
+changequote(, )dnl
+ ac_dir=`echo $ac_cv_path_JAVAH | sed 's,\(.*\)/[^/]*/[^/]*$,\1/include,'`
+ ac_machdep=`echo $build_os | sed 's,[-0-9].*,,' | sed 's,cygwin,win32,'`
+changequote([, ])dnl
+ CPPFLAGS="$ac_save_CPPFLAGS -I$ac_dir -I$ac_dir/$ac_machdep"
+ AC_TRY_CPP([#include <jni.h>],
+ ac_save_CPPFLAGS="$CPPFLAGS",
+ AC_MSG_WARN([unable to include <jni.h>]))
+ CPPFLAGS="$ac_save_CPPFLAGS"])
+fi])
diff --git a/lib/ffts/m4/ax_try_compile_java.m4 b/lib/ffts/m4/ax_try_compile_java.m4
new file mode 100644
index 0000000..8efd091
--- /dev/null
+++ b/lib/ffts/m4/ax_try_compile_java.m4
@@ -0,0 +1,55 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_try_compile_java.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_TRY_COMPILE_JAVA
+#
+# DESCRIPTION
+#
+# AX_TRY_COMPILE_JAVA attempt to compile user given source.
+#
+# *Warning*: its success or failure can depend on a proper setting of the
+# CLASSPATH env. variable.
+#
+# Note: This is part of the set of autoconf M4 macros for Java programs.
+# It is VERY IMPORTANT that you download the whole set, some macros depend
+# on other. Unfortunately, the autoconf archive does not support the
+# concept of set of macros, so I had to break it for submission. The
+# general documentation, as well as the sample configure.in, is included
+# in the AX_PROG_JAVA macro.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Devin Weaver <ktohg@tritarget.com>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 7
+
+AU_ALIAS([AC_TRY_COMPILE_JAVA], [AX_TRY_COMPILE_JAVA])
+AC_DEFUN([AX_TRY_COMPILE_JAVA],[
+AC_REQUIRE([AX_PROG_JAVAC])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [import $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AS_MESSAGE_LOG_FD
+ cat Test.java >&AS_MESSAGE_LOG_FD
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/lib/ffts/m4/ax_try_run_java.m4 b/lib/ffts/m4/ax_try_run_java.m4
new file mode 100644
index 0000000..80ec934
--- /dev/null
+++ b/lib/ffts/m4/ax_try_run_java.m4
@@ -0,0 +1,56 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_try_run_java.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_TRY_RUN_JAVA
+#
+# DESCRIPTION
+#
+# AX_TRY_RUN_JAVA attempt to compile and run user given source.
+#
+# *Warning*: its success or failure can depend on a proper setting of the
+# CLASSPATH env. variable.
+#
+# Note: This is part of the set of autoconf M4 macros for Java programs.
+# It is VERY IMPORTANT that you download the whole set, some macros depend
+# on other. Unfortunately, the autoconf archive does not support the
+# concept of set of macros, so I had to break it for submission. The
+# general documentation, as well as the sample configure.in, is included
+# in the AX_PROG_JAVA macro.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Devin Weaver <ktohg@tritarget.com>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 1
+
+AU_ALIAS([AC_TRY_RUN_JAVA], [AX_TRY_RUN_JAVA])
+AC_DEFUN([AX_TRY_RUN_JAVA],[
+AC_REQUIRE([AX_PROG_JAVAC])dnl
+AC_REQUIRE([AX_PROG_JAVA])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [include $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class && ($JAVA $JAVAFLAGS Test; exit) 2>/dev/null
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AS_MESSAGE_LOG_FD
+ cat Test.java >&AS_MESSAGE_LOG_FD
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/lib/ffts/src/Makefile.am b/lib/ffts/src/Makefile.am
index 8547795..aa4c3c3 100644
--- a/lib/ffts/src/Makefile.am
+++ b/lib/ffts/src/Makefile.am
@@ -20,10 +20,10 @@ libffts_la_SOURCES += vfp.s
else
if HAVE_NEON
+libffts_la_SOURCES += neon.s
+
if DYNAMIC_DISABLED
libffts_la_SOURCES += neon_static_f.s neon_static_i.s
-else
-libffts_la_SOURCES += neon.s
endif
else
diff --git a/lib/ffts/src/Makefile.in b/lib/ffts/src/Makefile.in
index a1eefbc..2002ad8 100644
--- a/lib/ffts/src/Makefile.in
+++ b/lib/ffts/src/Makefile.in
@@ -1,7 +1,7 @@
-# Makefile.in generated by automake 1.12.4 from Makefile.am.
+# Makefile.in generated by automake 1.14 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994-2012 Free Software Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -16,23 +16,51 @@
VPATH = @srcdir@
-am__make_dryrun = \
- { \
- am__dry=no; \
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
- echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
- | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
- *) \
- for am__flg in $$MAKEFLAGS; do \
- case $$am__flg in \
- *=*|--*) ;; \
- *n*) am__dry=yes; break;; \
- esac; \
- done;; \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
- test $$am__dry = yes; \
- }
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -54,12 +82,12 @@ host_triplet = @host@
@DYNAMIC_DISABLED_TRUE@am__append_1 = ffts_static.c
@DYNAMIC_DISABLED_FALSE@am__append_2 = codegen.c
@HAVE_VFP_TRUE@am__append_3 = vfp.s
-@DYNAMIC_DISABLED_TRUE@@HAVE_NEON_TRUE@@HAVE_VFP_FALSE@am__append_4 = neon_static_f.s neon_static_i.s
-@DYNAMIC_DISABLED_FALSE@@HAVE_NEON_TRUE@@HAVE_VFP_FALSE@am__append_5 = neon.s
+@HAVE_NEON_TRUE@@HAVE_VFP_FALSE@am__append_4 = neon.s
+@DYNAMIC_DISABLED_TRUE@@HAVE_NEON_TRUE@@HAVE_VFP_FALSE@am__append_5 = neon_static_f.s neon_static_i.s
@HAVE_NEON_FALSE@@HAVE_SSE_TRUE@@HAVE_VFP_FALSE@am__append_6 = sse.s
subdir = src
-DIST_COMMON = $(libffts_include_HEADERS) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in $(top_srcdir)/depcomp
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(top_srcdir)/depcomp $(libffts_include_HEADERS)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_check_classpath.m4 \
$(top_srcdir)/m4/ax_check_java_home.m4 \
@@ -111,14 +139,14 @@ am__libffts_la_SOURCES_DIST = ffts.c ffts_small.c ffts_nd.c \
codegen_sse.h ffts.h ffts_nd.h ffts_real.h ffts_real_nd.h \
ffts_small.h ffts_static.h macros-alpha.h macros-altivec.h \
macros-neon.h macros-sse.h macros.h neon.h neon_float.h \
- patterns.h types.h vfp.h ffts_static.c codegen.c vfp.s \
- neon_static_f.s neon_static_i.s neon.s sse.s
+ patterns.h types.h vfp.h ffts_static.c codegen.c vfp.s neon.s \
+ neon_static_f.s neon_static_i.s sse.s
@DYNAMIC_DISABLED_TRUE@am__objects_1 = ffts_static.lo
@DYNAMIC_DISABLED_FALSE@am__objects_2 = codegen.lo
@HAVE_VFP_TRUE@am__objects_3 = vfp.lo
-@DYNAMIC_DISABLED_TRUE@@HAVE_NEON_TRUE@@HAVE_VFP_FALSE@am__objects_4 = neon_static_f.lo \
+@HAVE_NEON_TRUE@@HAVE_VFP_FALSE@am__objects_4 = neon.lo
+@DYNAMIC_DISABLED_TRUE@@HAVE_NEON_TRUE@@HAVE_VFP_FALSE@am__objects_5 = neon_static_f.lo \
@DYNAMIC_DISABLED_TRUE@@HAVE_NEON_TRUE@@HAVE_VFP_FALSE@ neon_static_i.lo
-@DYNAMIC_DISABLED_FALSE@@HAVE_NEON_TRUE@@HAVE_VFP_FALSE@am__objects_5 = neon.lo
@HAVE_NEON_FALSE@@HAVE_SSE_TRUE@@HAVE_VFP_FALSE@am__objects_6 = \
@HAVE_NEON_FALSE@@HAVE_SSE_TRUE@@HAVE_VFP_FALSE@ sse.lo
am_libffts_la_OBJECTS = ffts.lo ffts_small.lo ffts_nd.lo ffts_real.lo \
@@ -126,22 +154,52 @@ am_libffts_la_OBJECTS = ffts.lo ffts_small.lo ffts_nd.lo ffts_real.lo \
$(am__objects_3) $(am__objects_4) $(am__objects_5) \
$(am__objects_6)
libffts_la_OBJECTS = $(am_libffts_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
-LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
- --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
- $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+ $(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo " CC " $@;
+am__v_CC_1 =
CCLD = $(CC)
-LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
- --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
- $(LDFLAGS) -o $@
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+ $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo " CCLD " $@;
+am__v_CCLD_1 =
CCASCOMPILE = $(CCAS) $(AM_CCASFLAGS) $(CCASFLAGS)
-LTCCASCOMPILE = $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
- --mode=compile $(CCAS) $(AM_CCASFLAGS) $(CCASFLAGS)
+LTCCASCOMPILE = $(LIBTOOL) $(AM_V_lt) $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(AM_CCASFLAGS) \
+ $(CCASFLAGS)
+AM_V_CCAS = $(am__v_CCAS_@AM_V@)
+am__v_CCAS_ = $(am__v_CCAS_@AM_DEFAULT_V@)
+am__v_CCAS_0 = @echo " CCAS " $@;
+am__v_CCAS_1 =
SOURCES = $(libffts_la_SOURCES)
DIST_SOURCES = $(am__libffts_la_SOURCES_DIST)
am__can_run_installinfo = \
@@ -150,11 +208,29 @@ am__can_run_installinfo = \
*) (install-info --version) >/dev/null 2>&1;; \
esac
HEADERS = $(libffts_include_HEADERS)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
@@ -328,6 +404,7 @@ $(top_srcdir)/configure: $(am__configure_deps)
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
+
install-libLTLIBRARIES: $(lib_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
@@ -362,8 +439,9 @@ clean-libLTLIBRARIES:
echo rm -f $${locs}; \
rm -f $${locs}; \
}
+
libffts.la: $(libffts_la_OBJECTS) $(libffts_la_DEPENDENCIES) $(EXTRA_libffts_la_DEPENDENCIES)
- $(LINK) -rpath $(libdir) $(libffts_la_OBJECTS) $(libffts_la_LIBADD) $(LIBS)
+ $(AM_V_CCLD)$(LINK) -rpath $(libdir) $(libffts_la_OBJECTS) $(libffts_la_LIBADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
@@ -381,34 +459,34 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/patterns.Plo@am__quote@
.c.o:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
.c.obj:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.c.lo:
-@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
.s.o:
- $(CCASCOMPILE) -c -o $@ $<
+ $(AM_V_CCAS)$(CCASCOMPILE) -c -o $@ $<
.s.obj:
- $(CCASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+ $(AM_V_CCAS)$(CCASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.s.lo:
- $(LTCCASCOMPILE) -c -o $@ $<
+ $(AM_V_CCAS)$(LTCCASCOMPILE) -c -o $@ $<
mostlyclean-libtool:
-rm -f *.lo
@@ -437,26 +515,15 @@ uninstall-libffts_includeHEADERS:
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(libffts_includedir)'; $(am__uninstall_files_from_dir)
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- mkid -fID $$unique
-tags: TAGS
-
-TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ $(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
@@ -468,15 +535,11 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$$unique; \
fi; \
fi
-ctags: CTAGS
-CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
@@ -485,9 +548,10 @@ GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
-cscopelist: $(HEADERS) $(SOURCES) $(LISP)
- list='$(SOURCES) $(HEADERS) $(LISP)'; \
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
@@ -644,20 +708,20 @@ uninstall-am: uninstall-libLTLIBRARIES \
.MAKE: install-am install-strip
-.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
- clean-libLTLIBRARIES clean-libtool cscopelist ctags distclean \
- distclean-compile distclean-generic distclean-libtool \
- distclean-tags distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am \
- install-libLTLIBRARIES install-libffts_includeHEADERS \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-compile mostlyclean-generic \
- mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \
- uninstall-am uninstall-libLTLIBRARIES \
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+ clean-libLTLIBRARIES clean-libtool cscopelist-am ctags \
+ ctags-am distclean distclean-compile distclean-generic \
+ distclean-libtool distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-libLTLIBRARIES \
+ install-libffts_includeHEADERS install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags tags-am uninstall uninstall-am uninstall-libLTLIBRARIES \
uninstall-libffts_includeHEADERS
diff --git a/lib/ffts/src/arch/.gitignore b/lib/ffts/src/arch/.gitignore
new file mode 100644
index 0000000..16c9840
--- /dev/null
+++ b/lib/ffts/src/arch/.gitignore
@@ -0,0 +1,6 @@
+/Makefile
+/Makefile.in
+/.deps
+/.libs
+/*.la
+/*.lo
diff --git a/lib/ffts/src/arch/ChangeLog b/lib/ffts/src/arch/ChangeLog
new file mode 100644
index 0000000..c42aa63
--- /dev/null
+++ b/lib/ffts/src/arch/ChangeLog
@@ -0,0 +1,4805 @@
+commit e8fa461503cf681fd7f6fffdbe94346cb4a0b94f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Sep 13 13:56:18 2014 -0400
+
+ [runtime] Remove an unused interpreter file.
+
+commit b8e69265771d2d730847add35620628ff003aed1
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Sep 9 09:14:37 2014 -0400
+
+ [cleanup] Remove more old files.
+
+commit 69d89956fcc24cec955246588269cb7c8012b7cb
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Sep 1 13:25:07 2014 -0400
+
+ [runtime] Remove the interpreter.
+
+commit a9db0d5b41d17cb7ff5788a63ce0eee1e01652b3
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Tue Jun 3 11:52:00 2014 -0400
+
+ Architectural level set to z10 instruction set
+
+commit edeeadda807c9189ad6b7cdd0f221c355ad95e52
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Tue Apr 29 16:56:12 2014 +0200
+
+ Add .gitignore file in mono/arch/arm64.
+
+commit 62b813772cfa4af873a278c39dd1f01dc6e50c2e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Apr 19 20:16:47 2014 +0200
+
+ [arm64] Add JIT support.
+
+commit 1d58ec09524d6f4ce37f39698e68fb45a3c0231b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Apr 19 17:03:21 2014 +0200
+
+ [arm64] Add basic port infrastructure.
+
+commit 12741090edd2230bfd0fac498af3e304680380b4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Apr 1 18:39:05 2014 +0000
+
+ [jit] Implement support for atomic intrinsics on arm.
+
+commit 21ca1bad7d0447bb5d420a58128e1c2733635efa
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Wed Dec 11 11:13:14 2013 -0500
+
+ [arch]Add cvtsi2ss to amd64 codegen.
+
+commit 4a25d5fa1811be15c62979993cd1a37c2891d0a5
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Sat Nov 23 18:26:55 2013 +0100
+
+ Fix the encoding of x86_imul_reg_mem_imm.
+
+commit 43b05e3c36d05526f7a9f3f8767569d026e4f1c6
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Fri Nov 15 15:08:06 2013 +0100
+
+ Fix the `nop` opcode on some MIPS-based Loongson CPUs.
+
+ After much trouble building Mono in Debian/MIPS, @directhex
+ narrowed it down to this issue:
+
+ https://sourceware.org/ml/binutils/2009-11/msg00387.html
+
+ So since some of the 2E and 2F versions of the Loongson CPUs
+ break with a regular `sll zero, zero, 0` we need to issue an
+ `or at, at, 0`. This makes sure we don't randomly deadlock or
+ blow up when the CPU is under heavy load.
+
+ Yes, really.
+
+commit 2f56d471f089b8f514377ce501a0c1643652d639
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri May 24 23:41:39 2013 +0200
+
+ Merge some Nacl/ARM changes from https://github.com/igotti-google/mono/commit/65d8d68e8c81cf6adb1076de7a9425c84cab86a3.
+
+commit ab6a96ef346220433f9f7967b763a0453d9cbc66
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue May 14 18:27:32 2013 +0200
+
+ Enable hw division/remainder on mt in non-thumb mode as well.
+
+commit 78c1e65942210449d0d1c4957b42242ebc9bdb5a
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Tue May 14 03:10:43 2013 +0200
+
+ Kill support for the ancient FPA format on ARM.
+
+commit a42bc8f14a3393150fb6fbb772c2b0259267f5ae
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Thu Apr 25 10:01:14 2013 -0400
+
+ Add lazy rgctx support to s390x
+
+commit 92b3dc346aad94e7e6a91e7356adcebbb180c618
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Apr 22 17:54:27 2013 +0200
+
+ Remove obsolete 32 bit s390 backend.
+
+commit 0d9d79945bfc7e791ed39e7519b8769a3c09fe28
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Thu Jan 31 12:48:49 2013 -0800
+
+ NaCl GC improvements
+
+ - inline managed code implementation
+ (add x86 test mem imm8 codegen macro for this as well)
+ - clean up libgc NaCl code
+ - centralize mono_nacl_gc into mini.c
+
+commit a2b380c30f8e12e508d9b761b9b049d17dff3617
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Mar 1 20:27:07 2013 +0100
+
+ Remove the unmaintained and incomplete alpha backend.
+
+commit ddee8bb5125ad07f673a5f9a45ddc629dec8c126
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Feb 26 22:08:26 2013 +0100
+
+ Remove the unmaintained and incomplete hppa backend.
+
+commit 9c434db79ba98565a8dadcfbbe8737621a698589
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 9 17:23:38 2012 -0400
+
+ Use full path for includes as this was braking the cross compiler.
+
+commit 600580c96563f5702acee5a0307432e96731d837
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Oct 4 13:03:06 2012 +0200
+
+ Save fp registers in the ARM throw trampoline, ios has callee saved fp registers, and LLVM generates code which uses them.
+
+commit 0b64268e0a56e3f76063f0b679975be0daaf68b1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Oct 3 10:26:37 2012 +0200
+
+ Use AM_CPPFLAGS instead of INCLUDES in Makefile.am files, as the later is no longer supported, see http://lists.gnu.org/archive/html/automake/2012-08/msg00087.html.
+
+commit f2e43c392dde726d2f1008dfcc8515d34354e968
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Sep 19 01:37:26 2012 +0000
+
+ Save/restore fp registers in MonoContext on ios. Fixes #1949.
+
+commit a841c76b86e38fc8e5db24f152b5fab2501ddf1a
+Author: Iain Lane <iain@orangesquash.org.uk>
+Date: Sun Apr 15 14:49:55 2012 +0100
+
+ Fix ARM printf format problems
+
+ When building with -Werror=format-security on ARM, mono fails to build
+ due to incorrect format strings in arm-dis.c
+
+commit 33426abe6bd7ad8eb37d2f214afe08a0a3d70a0b
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Mon Apr 2 13:30:43 2012 -0400
+
+ s390x-codegen.h - Define s390_SP and s390_BP
+ sgen-major-copy-object.h - Correct assertion test
+ sgen-os-posix.c - Prevent race condition between restarting and suspending a thread
+
+commit c565eab0f9d79f6009c3878eaa190529838b0204
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Mon Mar 12 16:15:46 2012 -0400
+
+ Update some copyrights
+
+commit d711efe0d6403fa49697c304696843a789805112
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Dec 2 06:20:16 2011 +0000
+
+ Ongoing MIPS work. Fix mips_load () to be patchable, fix endianness issue in OP_MIPS_MFC1D, fix OP_JMP. make rcheck runs now.
+
+commit 32a164a381080aee3afa42ea33e31d89579519a4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Nov 16 04:35:31 2011 -0500
+
+ Revert "Add support for hardfp abi on ARM."
+
+ This reverts commit e7055b45b9211fb20021997f7da0fa24992421f5.
+
+commit aaae806b8bd16a82937c9417689aeb82bea0b952
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Nov 9 10:25:48 2011 -0500
+
+ Update two days worth of copyrights, many more missing
+
+commit 96e5ba7724999828facefb30e0982d0be6931bda
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Nov 9 01:13:16 2011 +0100
+
+ Add support for hardfp abi on ARM.
+
+commit c6d53e16991eb2dcc3e4d99a008fdd899d2b78f2
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Fri Aug 5 17:02:45 2011 +0200
+
+ Fix up bugs in x86-codegen for NaCl.
+
+commit 8034d4b8f49485babcbffd12d3e09fd372c00ccb
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 6 16:16:16 2011 +0200
+
+ Prefix ARM FPA codegen macros with 'FPA'.
+
+commit d2a95b8feb24584dd528b3deb0f5f1ec5d7766a3
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jun 23 21:33:43 2011 +0200
+
+ Fix out-of-tree builds on arm.
+
+commit d093f6fff2bcaa4ccfc795354b151c7ca1a0c613
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Fri May 6 12:52:19 2011 -0400
+
+ Implement soft debugger for s390x and fix context macro for s390x
+
+commit 4c9723aa3efac03bc33deed252ebda71cbb1ae86
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 8 12:14:52 2011 +0100
+
+ Fix some warnings.
+
+commit b1a613aca13e03185d0ba49e46fd77fd8eb98fc9
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Feb 20 03:22:52 2011 +0100
+
+ Implement mono_memory_barrier () and OP_MEMORY_BARRIER for ARM.
+
+commit f81e3005a53a10c39f4ca8dd30a2a88719c7d005
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Sun Jan 16 23:40:23 2011 -0500
+
+ Cast result of s390x_emit16/32 to eliminate lots of warning messages
+ Check for wrapper-managed-to-native when assessing call parameters and have emit_prolog use native_size when processing those parameters
+ Signed-off-by: Neale Ferguson <neale@sinenomine.net>
+
+commit 92a55ae009739b5ec652676b8fdd615375c27fc0
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Jan 10 10:52:46 2011 +0000
+
+ Implement mono.simd new conversion ops on amd64
+
+commit b7639e01d7603a1e34dd225edb5e99fd2181494b
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Jan 10 10:40:12 2011 +0100
+
+ Implement a few conversion operations.
+
+ Add conversion operations between 4f, 2d and 4i.
+ Implemented only on x86 for now.
+
+commit f0e5c2be6946491ba052c82794361ec0d33cb04c
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Jan 7 00:19:03 2011 +0000
+
+ AMD64 version of the new mono.simd ops
+
+commit 1aa6254fb828e043ea55d7d3e37b02812e2d9bdf
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Jan 6 21:36:31 2011 +0100
+
+ Implement Shuffle for 64bits types.
+
+ * x86-codegen.h: Add macro and define to emit pshufpd.
+
+ * mini-ops.h: Add OP_SHUPD.
+
+ * cpu-x86.md:
+ * mini-x86.h: Implement x86 support.
+
+ * simd-intrinsics.c: Handle shuffle on 64bit types.
+
+ * VectorOperations.cs: Add new methods.
+
+commit c1fb94e7e72e58924dcebe8cdfcdbcbe1e65b644
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Jan 6 18:43:59 2011 +0100
+
+ Add SHUFPS and macro to emit it.
+
+commit 48f5efeb334eb4b6e867c65ae53e21b3c45fd771
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 6 19:35:45 2011 +0100
+
+ Put back a macro definition accidently removed by the nacl changes.
+
+commit a7074ea55af096913e4bcc8e044be7601bcc55b5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 6 11:49:32 2011 +0100
+
+ Fix warnings introduced by the NACL merge.
+
+commit 4edb45273377cc0858dab7e12b19026467e796c5
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Tue Dec 14 16:03:45 2010 -0800
+
+ Merge mono/io-layer, mono/metadata, mono/arch/x86 and configure.in for Native Client
+
+commit cfdf246cd2ffd65bd25e09f1d66bb55d57bf8953
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Tue Dec 14 14:37:36 2010 -0800
+
+ Changes to mono/arch/amd64 for Native Client
+
+commit aa974c33a3cee416fc456053164835acbf81df70
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Sep 24 11:28:46 2010 -0300
+
+ Implement amd64 support for OP_CARDTABLE.
+
+ * amd64-codegen.h (amd64_alu_reg_membase_size): Add support
+ for RIP based addressing.
+
+ * cpu-amd64.md: Add card_table_wbarrier.
+
+ * mini-amd64.c (mono_arch_output_basic_block): Emit the
+ new OP.
+
+ * mini-amd64.c (mono_arch_emit_exceptions): Handle another
+ kind of patch-info - GC_CARD_TABLE_ADDR. This is required
+ because we can neither have 64bits immediates with amd64
+ or 2 scratch regiters with current regalloc.
+
+ * mini-amd64.h: Define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER.
+
+commit 7981b77489eba9fafe98b764ae8c423143e55a25
+Author: Mark Mason <mmason@upwardaccess.com>
+Date: Wed Aug 18 23:39:36 2010 +0800
+
+ Simplify test for MIPS imm16 operands.
+
+ Code contributed under the MIT/X11 license.
+
+commit 881a8fe8dfebf42e0f50228319132001d121c983
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Mon Aug 9 17:40:18 2010 +0200
+
+ Add hooks to the codegen macros to support NACL codegen.
+
+commit da52cebbb28392e8043a36e8c29f4ceb4f706741
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Sun Jul 25 20:09:25 2010 +0530
+
+ EOL handling
+
+ This set of .gitattributes was automatically generated from the list of files
+ that GIT tried to normalize when I enabled automatic EOL conversion.
+
+ With this set of attributes, we prevent automated EOL conversion on files that
+ we know will cause trouble down the road.
+
+commit 80806328ee52ed52783e005f044e8447d34efac5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed May 19 02:35:46 2010 +0000
+
+ 2010-05-19 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h (ppc_load_func): Fix ilp32 support.
+
+ svn path=/trunk/mono/; revision=157521
+
+commit bb66b04f8ca017660ae65afa4b86a33b32d48cdb
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Apr 8 04:41:44 2010 +0000
+
+ .gitignore
+
+ svn path=/trunk/mono/; revision=155025
+
+commit 2b562993a3dced62eb48aeedcf38f234b655e86f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 29 23:21:23 2010 +0000
+
+ 2010-03-30 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/*.sh: Remove bash dependency.
+
+ svn path=/trunk/mono/; revision=154407
+
+commit 977db7f5b92aa4e7b8909f6d2440f3347e548364
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Mar 23 20:00:46 2010 +0000
+
+ Primarily, add support for mono_arch_get_throw_corlib_exception and IMT
+ for s390x. Other s390x fixes to instruction sizes, parameter passing, and ARCH
+ settings.
+
+
+ svn path=/trunk/mono/; revision=154085
+
+commit 282ce11cd7691698334563b95ca4b49e6c32f900
+Author: Gonzalo Paniagua Javier <gonzalo.mono@gmail.com>
+Date: Fri Nov 20 22:34:30 2009 +0000
+
+ removing PLATFORM_WIN32
+
+ svn path=/trunk/mono/; revision=146652
+
+commit 774d55350115d1c4f08dc2a9b015e9502d796cef
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Nov 10 00:58:49 2009 +0000
+
+ 2009-11-10 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-codegen.h: Fix the names of the LDMIA/STMIA macros, they don't actually
+ update the base register.
+
+ svn path=/trunk/mono/; revision=145786
+
+commit 568b4a7ab726e87c664a682193fa57c5521ed23c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Aug 14 13:49:01 2009 +0000
+
+ 2009-08-14 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-codegen.h: Add armv6 MOVW/MOVT.
+
+ svn path=/trunk/mono/; revision=139918
+
+commit c4d98f3131b6b7d0732050c2e0ac7bd05b6c27c2
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Tue Aug 4 00:31:14 2009 +0000
+
+ Contributed under the terms of the MIT/X11 license by
+ Jerry Maine <crashfourit@gail.com>.
+
+ * mono/arch/amd64/amd64-codegen.h: Added missing code gen marco for single packed square root.
+ * mono/mini/basic-simd.cs: added test for packed double square root.
+ * mono/mini/cpu-amd64.md: added opcode info for packed double square root.
+ * mono/mini/cpu-x86.md: added opcode info for packed double square root.
+ * mono/mini/mini-ops.h: added IR opcode for packed double square root.
+ * mono/mini/mini-x86.c: added IR to native translation code for packed double square root.
+ * mono/mini/mini-amd64.c: removed todo for packed double square root.
+ * mono/mini/simd-intrinsics.c: added method to IR opcode converstion for
+ packed double square root.
+
+ svn path=/trunk/mono/; revision=139309
+
+commit fc5d2d293fe800d860e9af4fcd9b19f9be7d4e17
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Jul 24 15:00:25 2009 +0000
+
+ Fri Jul 24 16:54:13 CEST 2009 Steven Munroe <munroesj@us.ibm.com>
+
+ This patch is contributed under the terms of the MIT/X11 license
+
+ * arch/ppc/ppc-codegen.h (ppc_ha): Define high adjusted
+ conversion to support combining addis for bits 32-47 with
+ signed load/store diplacements for bits 48-63.
+ (ppc_fcfidx, ppc_fctidx, ppc_fctidzx): Share with PPC32.
+ These instructions are availble to 32-bit programs on 64-bit
+ hardware and 32-bit both starting with PowerISA V2.01.
+ [__mono_ppc64__]: Define ppc_mftgpr and ppc_mffgpr for Power6
+ native mode.
+ [!__mono_ppc64__]: Define ppc_is_imm32 as constant true for
+ ppc32.
+
+
+ svn path=/trunk/mono/; revision=138635
+
+commit f44bc9e40cc840bf63bf782aa0338aae3e898f7f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jul 20 20:45:49 2009 +0000
+
+ 2009-07-20 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_sse_pminud_reg_reg): Fix the encoding
+ of this instruction.
+
+ svn path=/trunk/mono/; revision=138242
+
+commit 88ccf5c589b23d6e79ea5a588d3986693b09879a
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jul 13 21:58:58 2009 +0000
+
+ 2009-07-13 Zoltan Varga <vargaz@gmail.com>
+
+ * x86/x86-codegen.h: Applied patch from Marian Salaj <salo3@atlas.cz>.
+ Fix encoding of PMINSW and PMINSD. Fixes #521662.
+
+ svn path=/trunk/mono/; revision=137821
+
+commit 64d366eddf3b1c93bcaaff2190fa1cc2b01f7f03
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Fri Jul 10 22:35:07 2009 +0000
+
+ Contributed under the terms of the MIT/X11 license by
+ Jerry Maine <crashfourit@gail.com>.
+
+ * amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes.
+ * amd64/amd64-codegen.h: Fix bugs in simd marcos.
+
+ svn path=/trunk/mono/; revision=137736
+
+commit d7fa5cedae9e4859b340ee29e997dfd48b45ce6e
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Wed Jun 24 21:25:11 2009 +0000
+
+ Fix wrong date in my entry to ChangeLog files. Sorry! :((
+
+ svn path=/trunk/mono/; revision=136786
+
+commit 1c634ebda21ddf5392c9d8edd030323d1ad85962
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Wed Jun 24 21:19:29 2009 +0000
+
+ mini-amd64.c: Added code to convert simd IR to native amd64 sse.
+ amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes.
+
+ svn path=/trunk/mono/; revision=136785
+
+commit bb994071dcc42ba150d88776fe70f8d35fc522a9
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Jun 23 23:55:26 2009 +0000
+
+ Fix LCONV_TO_xx and ICONV_TO_xx. Fix leave_method dump of returned
+ structure. Fix formatting.
+ Correct instruction lengths.
+ Add new instructions.
+
+ svn path=/trunk/mono/; revision=136748
+
+commit f48a4f5a13745caf5350d6f190efb97ec6b605ef
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jun 22 15:25:02 2009 +0000
+
+ Fix a few uses of ppc_store_reg.
+
+ svn path=/trunk/mono/; revision=136607
+
+commit 4ecc9d712b82d78c853e574edc0345c85bfcd660
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jun 22 15:24:56 2009 +0000
+
+ Fix a few uses of ppc_load_reg/ppc_store_reg.
+
+ svn path=/trunk/mono/; revision=136606
+
+commit 40c668ecb1553ffb7b6575b439b3ff8420265cd8
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jun 22 15:22:10 2009 +0000
+
+ 2009-06-22 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h: Rework the naming of the load/store macros,
+ ldr/str now handle register sized quantities, while ldptr/stptr handle
+ pointer sized quantities.
+
+ svn path=/trunk/mono/; revision=136604
+
+commit cf0e113f7dd91ff8b46e35047cc48c2e5ece925c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jun 20 18:47:03 2009 +0000
+
+ 2009-06-20 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h: Fix the last change to avoid self-assignments inside
+ macros.
+
+ svn path=/trunk/mono/; revision=136548
+
+commit 3858973d0bd980206ea3725a2e74f2a336aa1aa1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jun 20 13:04:42 2009 +0000
+
+ 2009-06-20 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h: Add ppc_ldr/ppc_str macros to store regsize quantities.
+ Handle little endian host platforms in ppc_emit32.
+
+ svn path=/trunk/mono/; revision=136539
+
+commit 9629536810d07a63b980a29912eaf3df7313fee9
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Fri Jun 12 17:33:11 2009 +0000
+
+ Add marcos for coding two byte SIMD/SSE opcodes. Added comments to help tell the different types of SSE code gen marcos appart.
+
+ svn path=/trunk/mono/; revision=136018
+
+commit 76cddabf0319c7be9fae2b6c532aafe6587fafbc
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Apr 22 23:59:10 2009 +0000
+
+ merge
+
+ svn path=/trunk/mono/; revision=132427
+
+commit 965b554666f2999b9e01dd731b1134af1cfcd5fa
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Apr 6 15:09:57 2009 +0000
+
+ 2009-04-06 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-vfp-codegen.h: Add ARM_FSITOS/ARM_FSITOD.
+
+ svn path=/trunk/mono/; revision=131125
+
+commit 7b7235494cabe7c5a796fafd6297070f993b03a8
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Apr 2 22:37:35 2009 +0000
+
+ 2009-04-03 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h: Add macros for decoding the SIB byte.
+
+ svn path=/trunk/mono/; revision=130910
+
+commit 9f497af70ef5ed9244ffbe9a6263f7d077136148
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Apr 2 00:50:47 2009 +0000
+
+ 2009-04-02 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-vfp-codegen.h: Add missing VFP codegen macros.
+
+ svn path=/trunk/mono/; revision=130817
+
+commit 7c682141c5861685e5b0efdcc1f337083657cf9d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Mar 6 15:55:12 2009 +0000
+
+ 2009-03-06 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/tramp.c: Include a change from the debian patches. Avoid #include-ing
+ a file in the middle of a function.
+
+ svn path=/trunk/mono/; revision=128782
+
+commit a7f6dd7620d7c440216c0f156bcd969a28a592d4
+Author: Martin Baulig <martin@novell.com>
+Date: Sat Feb 28 14:36:50 2009 +0000
+
+ Create .gitignore's.
+
+ svn path=/trunk/mono/; revision=128265
+
+commit 22e6e9728faa11a87a7f6f0f0ff0f0f8ef754c03
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Feb 27 06:21:52 2009 +0000
+
+ 2009-02-27 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/{arm_fpamacros.h, arm_vfpmacros.h}: Remove these files, they are
+ autogenerated.
+
+ svn path=/trunk/mono/; revision=128179
+
+commit c70f15fc12afeb73f19d4ff18cf11b7289d76c4f
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Mon Feb 2 23:32:58 2009 +0000
+
+ 2009-02-02 Mark Probst <mark.probst@gmail.com>
+
+ Contributed under the terms of the MIT/X11 license by Steven
+ Munroe <munroesj@us.ibm.com>.
+
+ * ppc/ppc-codegen.h: Make operand order and case consistent
+ (assembler order) for ppc_load_reg_update, ppc_load_multiple_regs,
+ ppc_store_multiple_regs, ppc_lwz, ppc_lhz, ppc_lbz,
+ ppc_stw,ppc_sth, ppc_stb, ppc_stwu, ppc_lbzu, ppc_lfdu, ppc_lfsu,
+ ppc_lfsux, ppc_lfsx, ppc_lha, ppc_lhau, ppc_lhzu, ppc_lmw,
+ ppc_lwzu, ppc_stbu, ppc_stfdu, ppc_stfsu, ppc_sthu, ppc_stmw. Use
+ "i" or "ui" instead of "d" for immediated operands to immediate
+ arthimetic and logical instructions in macros ppc_addi, ppc_addis,
+ ppc_ori, ppc_addic, ppc_addicd, ppc_andid, ppc_andisd.
+ [__mono_ppc64__]: Make operand order and case consistent
+ (assembler order) for ppc_load_multiple_regs,
+ ppc_store_multiple_regs. Simplify the DS form and make them
+ consistent with D forms for ppc_load_reg, ppc_load_reg_update,
+ ppc_store_reg, ppc_store_reg_update. ppc_ld, ppc_lwa, ppc_ldu,
+ ppc_std, ppc_stdu. Define ppc_lwax and ppc_lwaux.
+
+ 2009-02-02 Mark Probst <mark.probst@gmail.com>
+
+ Contributed under the terms of the MIT/X11 license by Steven
+ Munroe <munroesj@us.ibm.com>.
+
+ * exceptions-ppc.c (restore_regs_from_context): Correct operand
+ order (offset then base reg) for ppc_load_multiple_regs.
+ (emit_save_saved_regs) Correct operand order for
+ ppc_store_multiple_regs.
+ (mono_arch_get_call_filter): Correct operand order for
+ ppc_load_multiple_regs.
+
+ * mini-ppc.c (emit_memcpy): Fix operand order for
+ ppc_load_reg_update and ppc_store_reg_update.
+ (mono_arch_output_basic_block): Correct operand order for ppc_lha.
+ (mono_arch_emit_epilog): Correct operand order for
+ ppc_load_multiple_regs.
+
+ * tramp-ppc.c (mono_arch_create_trampoline_code): Correct operand
+ order for ppc_store_multiple_regs and ppc_load_multiple_regs.
+
+ svn path=/trunk/mono/; revision=125443
+
+commit f228d47d2afc549321cec800466e6bc1cde631bb
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Jan 19 19:47:54 2009 +0000
+
+ 2009-01-19 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add x86_movd_xreg_membase.
+
+ svn path=/trunk/mono/; revision=123825
+
+commit 792160756d6ef76711408f151838c3f5a5f8d83b
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Dec 19 19:46:04 2008 +0000
+
+ 2008-12-19 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Fixed the argument order for lwzu in
+ ppc_load_reg_update.
+
+ svn path=/trunk/mono/; revision=121883
+
+commit 344a06253c9c1bad287e160b9714b0a052e68a09
+Author: Mark Mason <glowingpurple@gmail.com>
+Date: Sat Dec 13 06:54:25 2008 +0000
+
+ 2008-12-12 Mark Mason <mmason@upwardaccess.com>
+
+ * mips/mips-codegen.h: Changes to support n32.
+
+ Contributed under the MIT X11 license.
+
+ svn path=/trunk/mono/; revision=121488
+
+commit 2dcc1868b2e2e830a9fa84a445ee79a8f6ab38ba
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Wed Dec 10 09:33:57 2008 +0000
+
+ 2008-12-10 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc.c: Merged with mini-ppc64.c.
+
+ * mini-ppc.h: Define PPC_MINIMAL_PARAM_AREA_SIZE on all targets.
+
+ * Makefile.am: Use the same sources for PPC and PPC64.
+
+ * mini-ppc64.c: Removed.
+
+ 2008-12-10 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: A few new macros for the final PPC/PPC64
+ merge.
+
+ svn path=/trunk/mono/; revision=121203
+
+commit 77eff8936b5e423be2712ba66cd8baba0edd2795
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Dec 5 20:57:02 2008 +0000
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc.c: Some simple merges from mini-ppc64.c.
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: ppc_load_func must use ppc_load_sequence.
+ Added ppc_compare_log.
+
+ svn path=/trunk/mono/; revision=120890
+
+commit dd397c9fd311f0411694ff1cc7904aec14f4551b
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Dec 5 16:42:24 2008 +0000
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * tramp-ppc.c, mini-ppc.c, mini-ppc.h: Merged tramp-ppc.c with
+ tramp-ppc64.c.
+
+ * Makefile.am: Use tramp-ppc.c instead of tramp-ppc64.c.
+
+ * tramp-ppc64.c: Removed.
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Added ppc_load_func for PPC32. Added
+ ppc_load/store_multiple_regs and ppc_compare_reg_imm.
+
+ svn path=/trunk/mono/; revision=120852
+
+commit 7f226f68fb98684dafd132d90ca1a24635c33557
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Dec 2 16:03:45 2008 +0000
+
+ 2008-12-02 Mark Probst <mark.probst@gmail.com>
+
+ * tramp-ppc64.c (mono_arch_create_rgctx_lazy_fetch_trampoline):
+ Fix trampoline size.
+
+ * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: A few floating point
+ conversion opcodes are implemented natively instead via emulation.
+
+ 2008-12-02 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Opcodes for floating point conversions from
+ 64 bit integers.
+
+ Code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=120492
+
+commit 742361c7bfc21faf8485d20d00cdfc58c04800f9
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Nov 28 19:06:34 2008 +0000
+
+ 2008-11-28 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc64.c, mini-ppc64.h: Enable generalized IMT thunks and
+ make them work.
+
+ 2008-11-28 Mark Probst <mark.probst@gmail.com>
+
+ * object.c: Don't put function descriptors into generalized IMT
+ thunks.
+
+ 2008-11-28 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: #define for the maximum length of a load
+ sequence.
+
+ svn path=/trunk/mono/; revision=120248
+
+commit b45b096d6d4246f16d05e42838122f1d58f875f6
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Nov 21 00:21:53 2008 +0000
+
+ 2008-11-21 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: Several fixes. Now
+ PPC64 passes basic-long.exe.
+
+ 2008-11-21 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Make ppc_is_[u]imm16() work with 64 bit
+ values.
+
+ svn path=/trunk/mono/; revision=119560
+
+commit dc227de13e4f1cee33c379401adbb90a225e680a
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 20 23:45:00 2008 +0000
+
+ 2008-11-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add X86_SSE_MOVHPD_REG_MEMBASE and renamed MOVS to MOVSD.
+
+ svn path=/trunk/mono/; revision=119549
+
+commit 01e12b57e8773f9c65c64a91f956b0fa9335d095
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 20 23:44:44 2008 +0000
+
+ 2008-11-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add X86_SSE_MOVS_ reg/membase variants.
+
+ * x86/x86-codegen.h: Add x86_sse_alu_pd_reg_membase_imm.
+
+ * x86/x86-codegen.h: Sort the x86_sse_alu_* macros decently.
+
+ svn path=/trunk/mono/; revision=119545
+
+commit 96ed3f7c4ea51c61ec3b5d0600c32fa003b8e4f7
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Thu Nov 20 21:36:13 2008 +0000
+
+ 2008-11-20 Mark Probst <mark.probst@gmail.com>
+
+ * decompose.c: Decompose carry and overflow add on PPC64 like on
+ other 64 bit archs. Don't decompose sub at all on PPC64.
+
+ * mini-ppc64.c, exceptions-ppc64.c, tramp-ppc64.c, cpu-ppc64.md:
+ Several fixes and new opcodes. Now PPC64 runs (but doesn't pass)
+ basic-long.exe.
+
+ 2008-11-20 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Use ppc_load_reg instead of ppc_ld in
+ ppc_load_func to fix the 2 bit shift.
+
+ svn path=/trunk/mono/; revision=119516
+
+commit 14651d4fa6b039131000aa5157ed99b7526f89b8
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Thu Nov 20 21:27:36 2008 +0000
+
+ 2008-11-20 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: 64 bit division opcodes.
+
+ Code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=119515
+
+commit daa4af175e0f8b95888918dbf429c7d5f66d3c07
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Nov 20 14:28:51 2008 +0000
+
+ 2008-11-20 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/Makefile.am (libmonoarch_arm_la_SOURCES): Don't build tramp.c, it is only
+ used by the interpreter.
+
+ svn path=/trunk/mono/; revision=119444
+
+commit 3225dc9308230de9fbbca884c05e6b150a8e0333
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 20 14:12:04 2008 +0000
+
+ 2008-11-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add PEXTR B/W/D.
+
+ svn path=/trunk/mono/; revision=119441
+
+commit 5c317c4676f911a0620b54e6668cf66a5c0dda31
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Nov 18 21:56:58 2008 +0000
+
+ 2008-11-18 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add PINSR B/W/D.
+
+ svn path=/trunk/mono/; revision=119229
+
+commit b31b375fc1354cc835d183e7e251e602eeb038c5
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Nov 18 21:56:49 2008 +0000
+
+ 2008-11-17 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Fix comment about the version of PCMPGTQ.
+
+ * x86/x86-codegen.h: Add movsd constant and x86_sse_alu_sd_membase_reg
+ macro.
+
+ svn path=/trunk/mono/; revision=119227
+
+commit dbebfad82832bf895561902dd527d2e4c158c2c9
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Nov 18 15:32:41 2008 +0000
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Macro for nop added.
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc64.c, mini-ppc64.h, tramp-ppc64.c, cpu-ppc64.md: Changes
+ for PPC64. An empty program runs now.
+
+ svn path=/trunk/mono/; revision=119162
+
+commit 406790f1df77c80b5b28bcac561e7b6c6cd1a3a6
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Nov 18 10:25:11 2008 +0000
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: PPC64 code generation macros.
+
+ Based on code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=119141
+
+commit 484dbedc8136e413a77ee11938d40e713cfefcfd
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Nov 18 10:17:36 2008 +0000
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: A few fixes and additions.
+
+ Based on code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=119140
+
+commit 74b70bd5f7bc3b40a919c6c8b06c22facae8df6b
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Nov 17 17:00:22 2008 +0000
+
+ 2008-11-17 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add X86_SSE_MOVHPD_MEMBASE_REG constant
+ and x86_sse_alu_pd_membase_reg/x86_sse_alu_membase_reg macros.
+
+ svn path=/trunk/mono/; revision=119057
+
+commit 59483983e37bb55af19f4e98e3de2f1ad216989b
+Author: Andreas Färber <afaerber@mono-cvs.ximian.com>
+Date: Sat Nov 15 10:59:47 2008 +0000
+
+ 2008-11-15 Andreas Faerber <andreas.faerber@web.de>
+
+ * ppc/test.c: Add support for Mac OS X.
+
+ This commit is licensed under the MIT X11 license.
+
+ svn path=/trunk/mono/; revision=118924
+
+commit 6c930cb35aa08e10abba989d9cb8560b4636ba73
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 13 22:51:27 2008 +0000
+
+ 2008-11-13 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Remove not used macro x86_pshufd_reg_reg.
+
+ svn path=/trunk/mono/; revision=118779
+
+commit bfe79f71f1352fbbfb696de3b0c093562b6fefb5
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Nov 4 20:17:31 2008 +0000
+
+ 2008-11-04 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add store nta.
+
+ svn path=/trunk/mono/; revision=117921
+
+commit 42f47d048391da1619aa26b70e54980c4c33e3f2
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Nov 3 14:41:44 2008 +0000
+
+ 2008-11-03 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add prefetch instruction
+ and x86_sse_alu_reg_membase macro.
+
+ svn path=/trunk/mono/; revision=117753
+
+commit eaf2804839ffb61912a8eeef7c3a58463aafcdd6
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 28 19:24:34 2008 +0000
+
+ 2008-10-28 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add long version of the packed integer
+ ops.
+
+ svn path=/trunk/mono/; revision=117292
+
+commit 3fffcb4ac5879f2655ee3b4b3bee093a9eaa5016
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 28 00:05:56 2008 +0000
+
+ 2008-10-27 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add movddup.
+
+ svn path=/trunk/mono/; revision=117220
+
+commit bf9bec59fad96b9a7cb38921c26bb1c176fe40ce
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 24 21:58:17 2008 +0000
+
+ 2008-10-24 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add signed pack with saturation.
+
+ svn path=/trunk/mono/; revision=116995
+
+commit 2ffed07a8205616ea4a1605338f08c8ad6c77432
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 24 13:36:53 2008 +0000
+
+ 2008-10-24 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add signed packed mul high.
+
+ svn path=/trunk/mono/; revision=116936
+
+commit 2b6070d8bbd583f6bb90e02f3961252ef0854da8
+Author: Gonzalo Paniagua Javier <gonzalo.mono@gmail.com>
+Date: Fri Oct 24 01:02:49 2008 +0000
+
+ remove temporary/generated files
+
+ svn path=/trunk/mono/; revision=116902
+
+commit 7a2889c2ce0cfbc193324b64764a02e42f5daee8
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 24 00:35:54 2008 +0000
+
+ 2008-10-23 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add signed packed max, min, add/sub with saturation
+ and compare greater.
+
+ svn path=/trunk/mono/; revision=116896
+
+commit 600a42f70b41a94712aac746e44f2bba885dfc1f
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Oct 20 19:36:04 2008 +0000
+
+ 2008-10-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add multiply and store high.
+
+ svn path=/trunk/mono/; revision=116545
+
+commit 454b5617264c1bb64ff7296669db98a14cc58118
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 17 17:41:14 2008 +0000
+
+ 2008-10-17 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add packed int shuffle.
+
+ svn path=/trunk/mono/; revision=116265
+
+commit 8336fe34234402529da0e46af634948d678ee649
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Oct 16 23:22:27 2008 +0000
+
+ 2008-10-16 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add packed int compare equals and
+ psabw.
+
+ svn path=/trunk/mono/; revision=116117
+
+commit 0a6e6df8d766d7ad1b21d6c234826293d1317979
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Wed Oct 15 20:52:54 2008 +0000
+
+ 2008-10-15 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add packed int max/min/avg/shuffle and extract mask.
+
+ svn path=/trunk/mono/; revision=115919
+
+commit ec2240eaee83b7c5ff444e0708a114458394d55b
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 14 15:02:05 2008 +0000
+
+ 2008-10-14 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add movsldup and movshdup.
+
+ svn path=/trunk/mono/; revision=115785
+
+commit 7ed9633867d31f5dd5fd971611f952574c005a87
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Oct 13 22:13:15 2008 +0000
+
+ 2008-10-13 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add remaining FP sse1 ops.
+ Add sse ps encoding with imm operand.
+ Add remaining sse1 ops.
+
+ svn path=/trunk/mono/; revision=115699
+
+commit 18f1e82ca6ebaf0929f654a56ab9ddfadfacacb5
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Oct 13 01:13:10 2008 +0000
+
+ 2008-10-12 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add macro for sse41 ops.
+ Add defined for pack ops, dword shifts/mul/pack.
+
+ 2008-10-12 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * basic-simd.cs: Remove PackWithUnsignedSaturation tests as it turns out
+ that the packuswb/packusdw don't work with unsigned numbers for what
+ would be negative numbers in signed format.
+
+ * cpu-x86.md: Add doubleword forms of many ops and packing ones.
+ Fix the len of fconv_to_r8_x and xconv_r8_to_i4.
+
+ * mini-ops.h: Add doubleword forms of many ops and packing ones.
+
+ * mini-x86.c: Emit doubleword forms of many ops and packing ones.
+
+ * simd-intrinsics.c (SimdIntrinsc): Rename the flags field to simd_version.
+
+ * simd-intrinsics.c (vector4f_intrinsics): Use simd_version field for sse3 ops.
+
+ * simd-intrinsics.c (vector4u_intrinsics): Rename to vector4ui_intrinsics and
+ add more ops.
+
+ * simd-intrinsics.c (simd_version_name): New function, returns the name of the
+ version as the enum in mini.h.
+
+ * simd-intrinsics.c (emit_intrinsics): Instead of having a special emit mode
+ for sse3 ops, check the simd_version field if present. This way the code
+ works with all versions of sse.
+
+ svn path=/trunk/mono/; revision=115610
+
+commit 494ea4f86907f393c8f0ba660edb100a107a8c80
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Oct 11 05:26:06 2008 +0000
+
+ 2008-10-11 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_jump_code_size): Fix the 64 bit support.
+
+ svn path=/trunk/mono/; revision=115509
+
+commit ba0739c0dc1dd6713f6127160dcee501b105c300
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Oct 10 21:55:37 2008 +0000
+
+ 2008-10-10 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_jump_code_size): Handle non 32-bit targets.
+
+ svn path=/trunk/mono/; revision=115494
+
+commit 5de452f7ff84e26bd22b86205a1cdb9fc207fe75
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Oct 9 18:28:16 2008 +0000
+
+ 2008-10-09 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add macros for sse shift, pack, unpack,
+ saturated math and packed byte/word/dword math.
+
+ svn path=/trunk/mono/; revision=115367
+
+commit 922c5a03dc6cd66147b1c6bfeb8c1045176618da
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 3 14:28:09 2008 +0000
+
+ 2008-10-03 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add macros and enum for SSE instructions.
+
+ svn path=/trunk/mono/; revision=114751
+
+commit f2d756dab8d08c009df41d94eb21fdf427a8e01a
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Sat Sep 27 13:02:48 2008 +0000
+
+ 2008-09-27 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: A few typecasts to fix compiler warnings.
+
+ 2008-09-27 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc.c: Compiler warning fixes.
+
+ svn path=/trunk/mono/; revision=114279
+
+commit 386d8b482a7e399e4e8d130dd0d2d2ab405068ae
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Sun Sep 7 10:25:11 2008 +0000
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * marshal.c (mono_type_native_stack_size): Treat
+ MONO_TYPE_TYPEDBYREF like MONO_TYPE_VALUETYPE.
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * method-to-ir.c (mono_method_to_ir2): Disable tail calls for PPC
+ until they're implemented properly.
+
+ * exceptions-ppc.c: Use arch-independent exception-handling code
+ instead of custom one.
+
+ * exceptions-ppc.c, mini-ppc.c, mini-ppc.h: Bug fixes and changes
+ for Linear IR.
+
+ * tramp-ppc.c, mini-ppc.c: Fixed warnings.
+
+ * decompose.c, aot-runtime.c, aot-compiler.c: PPC code also
+ applies when __powerpc__ is defined.
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * libtest.c: Darwin structure alignment also applies to PPC.
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h (ppc_load): Inserted cast to fix some
+ warnings.
+
+ svn path=/trunk/mono/; revision=112455
+
+commit 5c8178c1e6cf4d2370c865c6bc66995ca1174eb9
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Mon Jun 16 09:37:01 2008 +0000
+
+ 2008-06-16 Mark Probst <mark.probst@gmail.com>
+
+ * amd64/amd64-codegen.h: Removed extraneous parenthesis in a macro
+ nobody uses.
+
+ svn path=/trunk/mono/; revision=105886
+
+commit ecbcbb317678440e62a13e16820f95f6ea2dff3d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jun 6 02:08:56 2008 +0000
+
+ 2008-06-06 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_padding_size): Rewrite this to use the
+ instructions recommended by the amd64 manual.
+
+ svn path=/trunk/mono/; revision=105134
+
+commit 0ded1416da01e39a6c4a33fc9798123d4021fe4d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Apr 19 14:18:56 2008 +0000
+
+ 2008-04-19 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_is_imm32): Use gint64 instead of glong because of
+ win64.
+
+ svn path=/trunk/mono/; revision=101210
+
+commit cb1954322f73b8d1b0a6836c5242b05538ed72dd
+Author: Jb Evain <jbevain@gmail.com>
+Date: Sun Apr 13 11:44:22 2008 +0000
+
+ last merge 100420:100549
+
+ svn path=/branches/jb/ml2/mono/; revision=100550
+
+commit a977d5e7585e338491944fc87b5e018891eedd93
+Author: Geoff Norton <grompf@sublimeintervention.com>
+Date: Wed Mar 12 17:08:32 2008 +0000
+
+ In .:
+ 2008-03-13 Geoff Norton <gnorton@novell.com>
+
+ * arch/arm/tramp.c: Dont compile this on PLATFORM_MACOSX
+
+
+ svn path=/trunk/mono/; revision=98063
+
+commit 8c6ca9f3fda169feccab289ecd181e06bcc8e133
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Feb 18 18:25:24 2008 +0000
+
+ 2008-02-18 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_movsxd_reg_mem): New codegen macro.
+
+ svn path=/trunk/mono/; revision=96092
+
+commit 7a7cef000b9d59672b47c0fcdf75bd1fc00b8c78
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Feb 14 14:21:56 2008 +0000
+
+ 2008-02-14 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_alu_membase8_imm_size): New codegen macro.
+
+ svn path=/trunk/mono/; revision=95633
+
+commit 9cbc23b5ee9e4f2dca88f8418d11be97079c25a1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Feb 8 14:28:06 2008 +0000
+
+ 2008-02-08 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-codegen.h: Fix the ordering of arguments for some load/store opcodes
+ so they are consistent.
+
+ svn path=/trunk/mono/; revision=95254
+
+commit b951542a9ead8a408c6560a0ffad28a5ade9670d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 24 20:12:46 2008 +0000
+
+ 2008-01-24 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Or if INTERP_SUPPORTED is true.
+
+ svn path=/trunk/mono/; revision=93834
+
+commit 95aa5dc93dbfbcf10125032ecde0e5eabc969a98
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 24 20:10:14 2008 +0000
+
+ 2008-01-24 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Only set this on arm.
+
+ svn path=/trunk/mono/; revision=93833
+
+commit 11c84542edf07ed41b831c12058f9a0bdd83df93
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Nov 20 17:45:36 2007 +0000
+
+ 2007-11-20 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_alu_reg_imm_size): Prefer the smaller
+ instruction encoding.
+
+ svn path=/trunk/mono/; revision=90005
+
+commit b15fabef0c7798e4850432910d97e0249cd691fc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Nov 10 15:22:00 2007 +0000
+
+ 2007-11-03 David S. Miller <davem@davemloft.net>
+
+ * sparc/sparc-codegen.h (sparc_set32, sparc_set): A plain sethi
+ can be used if the constant value only has the top 22 bits set.
+
+ svn path=/trunk/mono/; revision=89409
+
+commit e22c1134d1553f6da21c1ef50ab4afb009d7c215
+Author: Geoff Norton <grompf@sublimeintervention.com>
+Date: Mon Nov 5 22:28:08 2007 +0000
+
+ 2007-11-01 Geoff Norton <gnorton@novell.com>
+
+ * x86/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true
+ Fixes the build on Leopard.
+
+
+ svn path=/trunk/mono/; revision=88931
+
+commit ad3b3601f5c113df825c3d2e09fb03b5aa4d1208
+Author: Geoff Norton <grompf@sublimeintervention.com>
+Date: Thu Nov 1 19:03:16 2007 +0000
+
+ 2007-11-01 Geoff Norton <gnorton@novell.com>
+
+ * ppc/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true
+ Fixes the build on Leopard.
+
+ svn path=/trunk/mono/; revision=88673
+
+commit 8991f4a9503167171a0ad5e745d71ec4bd8b846c
+Author: Jonathan Chambers <joncham@gmail.com>
+Date: Fri Oct 26 14:41:54 2007 +0000
+
+ 2007-10-26 Jonathan Chambers <joncham@gmail.com>
+
+ * mini-amd64.c: Begin Win64 port. Use AMD64_ARG_REG#
+ defines to access param registers. Replace long usage with
+ gsize as sizeof(long) != sizeof(void*) on Win64.
+
+ * mini-amd64.h: Add %rdi and %rsi to MonoLMF structure
+ on Win64. Fix intrinsic, use _AddressOfReturnAddress
+ instead of non-existant _GetAddressOfReturnAddress.
+
+ * tramp-amd64.c: Use AMD64_ARG_REG# defines to access
+ param registers. Save/restore %rdi and %rsi in MonoLMF.
+
+ * exceptions-amd64.c: Use AMD64_ARG_REG# defines to access
+ param registers. Modify (throw_exception) signature to take
+ %rdi and %rsi on Win64.
+
+ Code is contributed under MIT/X11 license.
+
+ 2007-10-26 Jonathan Chambers <joncham@gmail.com>
+
+ * amd64/amd64-codegen.h: Begin Win64 port. Use AMD64_ARG_REG#
+ defines to access param registers. Replace long usage with
+ gsize as sizeof(long) != sizeof(void*) on Win64.
+
+ Code is contributed under MIT/X11 license.
+
+
+ svn path=/trunk/mono/; revision=88258
+
+commit 118f4540a2da9cdb72debfb786a9930e93f2a10b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Oct 9 00:12:58 2007 +0000
+
+ 2007-10-09 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_jump_membase_size): Remove an unneccesary
+ rex prefix which trips up valgrind.
+
+ svn path=/trunk/mono/; revision=87140
+
+commit e43f3ebed2b5b54c47b5f8ce458788dce0ef97dc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jul 14 14:04:54 2007 +0000
+
+ 2007-07-14 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h: Remove some unused rex prefixes.
+
+ svn path=/trunk/mono/; revision=81979
+
+commit 25f0e1d2bd61097c008fa88e4a114884bb6fe0c9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jul 4 13:17:45 2007 +0000
+
+ Wed Jul 4 15:29:07 CEST 2007 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added minimal sse instructions currently
+ needed by the JIT.
+
+
+ svn path=/trunk/mono/; revision=81331
+
+commit e971b6ec5cf03043dc227759fced05d5786964d4
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jun 13 17:41:53 2007 +0000
+
+ 2007-06-13 Randolph Chung <tausq@debian.org>
+
+ * hppa/hppa-codegen.h: Update with more instructions.
+ * hppa/tramp.c: Disable for linux since we don't support the
+ interpreter.
+
+
+ svn path=/trunk/mono/; revision=79463
+
+commit 26169bb71cd30b373975373952fb11d7a26b0cca
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 20 19:41:51 2007 +0000
+
+ 2007-05-20 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_call_reg): Remove a got prefix which isn't needed.
+
+ svn path=/trunk/mono/; revision=77730
+
+commit a024b2405701bbee2003e46a0f9b0e2c0486033c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Apr 23 11:31:33 2007 +0000
+
+ 2007-04-23 Zoltan Varga <vargaz@gmail.com>
+
+ * alpha/alpha-codegen.h: More alpha port work from
+ Sergey Tikhonov <tsv@solvo.ru>.
+
+ svn path=/trunk/mono/; revision=76103
+
+commit 5ca5ea86f1ff85953c28e0ba3b657268cd2cdfba
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Sun Apr 15 09:11:00 2007 +0000
+
+ * tramp.c: Add MONO_TYPE_PTR case.
+ * mini-s390.c: Correct checking for enum type in return value processing.
+
+ svn path=/trunk/mono/; revision=75718
+
+commit 9159abc7ec906d64a15eee8e02b9e5b3f2cce87d
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Thu Apr 12 20:45:34 2007 +0000
+
+ * tramp.c: Add MONO_TYPE_PTR case.
+
+
+ svn path=/trunk/mono/; revision=75663
+
+commit b7fd657ee94257eeec946fa9eb11b3f60e7e33e6
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Mar 12 16:07:56 2007 +0000
+
+ Mon Mar 12 17:07:32 CET 2007 Paolo Molaro <lupus@ximian.com>
+
+ * amd64/amd64-codegen.h: removed some useless size rex prefixes.
+
+
+ svn path=/trunk/mono/; revision=74128
+
+commit 0ba3e4bdd057c7a0d25767f7647a00f07683b44c
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jan 24 20:01:27 2007 +0000
+
+ Wed Jan 24 21:00:40 CET 2007 Paolo Molaro <lupus@ximian.com>
+
+ * arm/arm-codegen.h: fixed encoding of short/byte load/store
+ instructions with negative immediate offsets.
+
+
+ svn path=/trunk/mono/; revision=71622
+
+commit 0251f000fba5c8f99bec6c33beae0c2aabe66451
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Jan 23 17:11:29 2007 +0000
+
+ * s390x-codegen.h: Add packed attribute to several instruction structures.
+
+ svn path=/trunk/mono/; revision=71523
+
+commit 8e25ae408b9d1836130807d3f465023347051332
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Dec 22 22:51:15 2006 +0000
+
+ Patch from Sergey Tikhonov <tsv@solvo.ru>
+
+ Mono on Alpha updates:
+
+ - Code cleanup
+ - Some hacks to support debugger
+ - updates for "linears" optimization
+
+
+ svn path=/trunk/mono/; revision=69976
+
+commit edd2746e20c982e094abfd547afad74d8e7d2302
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Nov 20 16:37:26 2006 +0000
+
+ Mon Nov 20 17:36:45 CET 2006 Paolo Molaro <lupus@ximian.com>
+
+ * arm/arm-codegen.h: added suppot for thumb interworking instructions.
+
+
+ svn path=/trunk/mono/; revision=68201
+
+commit b63503e7c4b5ebb8baafb5b58ec69395146db022
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Nov 15 16:00:09 2006 +0000
+
+ Wed Nov 15 16:56:53 CET 2006 Paolo Molaro <lupus@ximian.com>
+
+ * mips/*: fixes by Mark E Mason <mark.e.mason@broadcom.com>.
+
+
+ svn path=/trunk/mono/; revision=67929
+
+commit 6f8d67005785ba86e81ac930325767d0b270a070
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Nov 10 18:42:10 2006 +0000
+
+ Typo fixes.
+
+ svn path=/trunk/mono/; revision=67683
+
+commit f99322f3ea7b7be85ac63c87c664aafb7f5e17bf
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Oct 11 21:34:24 2006 +0000
+
+ 2006-10-11 Sergey Tikhonov <tsv@solvo.ru>
+
+ * atomic.h: Fix atomic decrement.
+
+ * mini/cpu-alpha.md: Use native long shift insts
+
+ * mono/mono/mini/tramp-alpha.c: Implemented
+ mono_arch_patch_delegate_trampoline method
+
+ * Started work on using global registers
+
+ * Use byte/word memory load/store insts if cpu supports it
+
+ * Code clean up
+
+
+
+
+ svn path=/trunk/mono/; revision=66573
+
+commit 538fd0794b9ef24f7c765891ed682fc947cf8e85
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Sep 12 13:02:59 2006 +0000
+
+ 2006-09-12 Zoltan Varga <vargaz@gmail.com>
+
+ * alpha/alpha-codegen.h: More alpha updates from Sergey Tikhonov <tsv@solvo.ru>.
+
+ svn path=/trunk/mono/; revision=65305
+
+commit 0689ca5f72fa8cb03fb1b565a31c4e2b22774a64
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Sep 12 11:10:42 2006 +0000
+
+ Tue Sep 12 13:09:56 CEST 2006 Paolo Molaro <lupus@ximian.com>
+
+ * arm/*: VFP floating point format code generation support.
+
+
+ svn path=/trunk/mono/; revision=65295
+
+commit deacad246a936216f09a81b9881c6780de8dd406
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Sep 12 10:05:29 2006 +0000
+
+ 2006-09-12 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h: Add xmpy_l/u pseudo ops.
+
+ svn path=/trunk/mono/; revision=65289
+
+commit 207e90216277d1d1ee0e6cd37f183440c8c39a26
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 19 12:10:43 2006 +0000
+
+ 2006-07-19 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h: Fix amd64_mov_mem_reg.
+
+ svn path=/trunk/mono/; revision=62746
+
+commit 8f58fa13418008cb86a8ba450a894b23efc4574e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 19 12:09:09 2006 +0000
+
+ 2006-07-19 Zoltan Varga <vargaz@gmail.com>
+
+ * alpha/alpha-codegen.h alpha/test.c alpha/tramp.c: Applied patch from
+ Sergey Tikhonov <tsv@solvo.ru>. Updates to alpha support.
+
+ svn path=/trunk/mono/; revision=62745
+
+commit ef8021400f045f835fcf70baf5ba5880fe6eca93
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jun 15 15:00:59 2006 +0000
+
+ Thu Jun 15 16:59:36 CEST 2006 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: reduce noisy build warnings by
+ casting to the more commonly used unsigned char type
+ (from johannes@sipsolutions.net (Johannes Berg)).
+
+
+ svn path=/trunk/mono/; revision=61757
+
+commit de54a3e44b1214298b39386b49e1ca992176e2e4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 14 18:51:25 2006 +0000
+
+ 2006-05-14 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_fetchadd8_acq_hint_pred): Fix encoding of this
+ opcode.
+
+ svn path=/trunk/mono/; revision=60695
+
+commit 3b274ddc5c946640a4c0d6a7b2dee13cd2f5096d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Apr 21 14:51:24 2006 +0000
+
+ 2006-04-21 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Revert the last change as arm depends on the old
+ behaviour.
+
+ svn path=/trunk/mono/; revision=59758
+
+commit e830aadb2febf62051b8fc162884a909087cfe4e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Apr 12 19:02:09 2006 +0000
+
+ 2006-04-12 Zoltan Varga <vargaz@gmail.com>
+
+ * sparc/sparc-codegen.h (sparc_inst_i): New disassembly macro.
+
+ svn path=/trunk/mono/; revision=59415
+
+commit a65cd014e420a38b47e00f5c6f9ce590fc00987b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Apr 4 13:18:49 2006 +0000
+
+ 2006-04-04 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Avoid compiling subdirs needed by the
+ interpreter.
+
+ svn path=/trunk/mono/; revision=59009
+
+commit 0d566f3cb37ddf731fba6cfce9741e2224a13d77
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Mar 13 22:03:39 2006 +0000
+
+ * s390x-codegen.h: Fix immediate checks.
+
+ svn path=/trunk/mono/; revision=57914
+
+commit 15bc8b574c91bfaa40cd1d83374d0179148b5894
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jan 6 18:52:21 2006 +0000
+
+ * s390x-codegen.h: Add lpdbr instruction (OP_ABS).
+
+ * mini-s390x.c, inssel-s390x.brg, cpu-s390x.md: Fix ATOMIC_I8
+ operations. Provide initial support for OP_ABS.
+
+ svn path=/trunk/mono/; revision=55158
+
+commit 1092c74e7a468b7761df92c2dc0dd2f2b49f21e6
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Jan 3 19:40:34 2006 +0000
+
+ * mono/io-layer/ChangeLog, mono/io-layer/atomic.h, mono/mini/mini-s390x.c,
+ mono/mini/mini-s390x.h, mono/mini/exceptions-s390x.c,
+ mono/mini/ChangeLog, mono/mini/s390-abi.cs, mono/mini/tramp-s390x.c,
+ mono/mini/inssel-s390x.brg, mono/mini/cpu-s390x.md, mono/mini/mini-codegen.c
+ mono/mini/basic-long.cs, mono/mini/Makefile.am, mono/arch/s390x/ChangeLog
+ mono/arch/s390x/s390x-codegen.h: 64-bit s390 support
+
+ svn path=/trunk/mono/; revision=55020
+
+commit 417b7fbe8f810e8fd62b2cb805164a3b80a536d6
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Dec 22 20:18:18 2005 +0000
+
+ 2005-12-22 Zoltan Varga <vargaz@gmail.com>
+
+ * sparc/sparc-codegen.h (sparc_membar): Add membar instruction.
+
+ svn path=/trunk/mono/; revision=54750
+
+commit 259b4749eaf68bfd6818ab38df91e37239c5dd45
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Dec 13 19:12:20 2005 +0000
+
+ Continuing to bring s390 up to current levels
+
+ svn path=/trunk/mono/; revision=54312
+
+commit f5fc186c01c764705e303b3783bf06e507e54640
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Dec 13 13:57:51 2005 +0000
+
+ Avoid lvalue pointer casts.
+
+ svn path=/trunk/mono/; revision=54279
+
+commit ab97bc8d9e311f447d9f4a78e5a28ef6ff9b82ad
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Oct 30 18:06:59 2005 +0000
+
+ 2005-10-30 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_m17): Fix a warning.
+
+ svn path=/trunk/mono/; revision=52399
+
+commit bb6893fc1e1854a8c9f848dfbfbc2dd00bde8735
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Oct 16 15:21:39 2005 +0000
+
+ 2005-10-16 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (AMD64_CALLEE_SAVED_REGS): Add %rbp.
+
+ svn path=/trunk/mono/; revision=51764
+
+commit 0b2d13a625bfd03f8d24538ef48870daed540ee3
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Oct 7 21:25:31 2005 +0000
+
+ Patch incorporated from SUSE, Neale reviewed it
+
+ svn path=/trunk/mono/; revision=51443
+
+commit 2bba48015b516fd326cd082eb85325aa5b7676bf
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Oct 7 20:36:01 2005 +0000
+
+ Patch incorporated from SUSE, Neale reviewed it
+
+ svn path=/trunk/mono/; revision=51434
+
+commit 749c9989f64683d8363481304647924ec1d910af
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Sep 27 13:25:16 2005 +0000
+
+ Another compilation fix.
+
+ svn path=/trunk/mono/; revision=50857
+
+commit 64dbeb6e048aa9654800624a74e9c58065cf01ea
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Tue Sep 27 09:09:41 2005 +0000
+
+ * arm/dpiops.sh, arm/fpaops.h: Output to stdout.
+ * arm/Makefile.am (arm_dpimacros.h, arm_fpamacros.h): Update. Fix
+ for srcdir != builddir.
+
+ svn path=/trunk/mono/; revision=50833
+
+commit 7c363c19299d3f85ee7de0eec2a83108ea98eff2
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Sep 26 08:58:47 2005 +0000
+
+ Compilation fix.
+
+ svn path=/trunk/mono/; revision=50748
+
+commit 541c387c65579ca75abe8cdb9d0725c1e6d90df1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Sep 11 16:55:41 2005 +0000
+
+ 2005-09-11 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_unw_pop_frames): New unwind macro.
+
+ svn path=/trunk/mono/; revision=49910
+
+commit efbd8e41cf3337d59812a7cca48df3caee116b07
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Sep 10 20:50:37 2005 +0000
+
+ 2005-09-10 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h: Remove 'manual' emitting of instructions.
+ Integrate emission of unwind directives into the assembly macros.
+
+ svn path=/trunk/mono/; revision=49875
+
+commit 8b07d9836f60fee4ff83a14ce110921be8ef8f2e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Sep 3 22:06:10 2005 +0000
+
+ 2005-09-04 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_no_stop): New macro.
+
+ svn path=/trunk/mono/; revision=49399
+
+commit 4e89407a4a8dc38125a804df930515a31603cdca
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Aug 27 14:33:09 2005 +0000
+
+ 2005-08-27 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h: Fix some bugs.
+
+ * ia64/codegen.c: Update to work with latest ia64-codegen.h
+
+ svn path=/trunk/mono/; revision=48969
+
+commit 9a52b3ea85b1899c6cc23263eec6879841b3fd08
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Aug 26 13:34:24 2005 +0000
+
+ 2005-08-26 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/Makefile.am: Distribute ia64-codegen.h.
+
+ svn path=/trunk/mono/; revision=48891
+
+commit 16291812e22e9750bf101e297fc573ce35bab382
+Author: Wade Berrier <wade@mono-cvs.ximian.com>
+Date: Fri Aug 26 06:58:33 2005 +0000
+
+ Oops
+
+ svn path=/trunk/mono/; revision=48874
+
+commit d4b1ea47e0395555276e1a6c8ddfa3800692b6ea
+Author: Wade Berrier <wade@mono-cvs.ximian.com>
+Date: Fri Aug 26 06:48:41 2005 +0000
+
+ Include files for 'make dist'
+
+ svn path=/trunk/mono/; revision=48871
+
+commit cac0da0afb2a782de1db55a000a2125531e757fd
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Aug 20 22:16:11 2005 +0000
+
+ 2005-08-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Improve ins scheduling and fix some bugs.
+
+ svn path=/trunk/mono/; revision=48614
+
+commit d151f0e0b203a78ca99cab91d9df89ffe7728880
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Aug 17 20:28:30 2005 +0000
+
+ 2005-08-17 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Add dependency information for all instructions.
+
+ svn path=/trunk/mono/; revision=48476
+
+commit f1bce593b3504a82fc344d696eeedd91c39bcfee
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Aug 4 18:51:34 2005 +0000
+
+ Uncommitted fixes.
+
+ svn path=/trunk/mono/; revision=48015
+
+commit 8348805e278d70da207455a0fe5cd470b00f3d8d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jul 30 15:43:43 2005 +0000
+
+ 2005-07-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=47855
+
+commit 0fb75c64cb1361cc81a4e47ca556a597b440d65a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jul 20 16:55:20 2005 +0000
+
+ Wed Jul 20 18:01:54 BST 2005 Paolo Molaro <lupus@ximian.com>
+
+ * arm/*: more codegen macros.
+
+
+ svn path=/trunk/mono/; revision=47473
+
+commit 2205bab6932e69490e48b9e11957041e938020ee
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jul 18 20:33:37 2005 +0000
+
+ 2005-07-18 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h (ia64_is_adds_imm): Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=47395
+
+commit 5a9a7537801ad68c0f8552e7e107994b793e93ac
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jun 22 22:00:43 2005 +0000
+
+ 2005-06-23 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Add some new pseudo ops.
+
+ svn path=/trunk/mono/; revision=46401
+
+commit f51b94e34b1a887304ace96af27d51b4ec98ab4b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Jun 19 20:18:07 2005 +0000
+
+ 2005-06-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Fix encoding of ia64_fclass.
+
+ svn path=/trunk/mono/; revision=46224
+
+commit 398224a9101808c8ca470b24366a506eeefec135
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Jun 12 20:41:05 2005 +0000
+
+ 2005-06-12 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45834
+
+commit 5a9f032072053d76af233b9906614ee491d6295c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jun 9 20:22:08 2005 +0000
+
+ 2005-06-09 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45719
+
+commit 5f3ca7841b8aedd35f0c23781f2ac96f31ed501e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon May 30 14:09:48 2005 +0000
+
+ 2005-05-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/codegen.c: Fix it after latest changes.
+
+ svn path=/trunk/mono/; revision=45192
+
+commit d6844049f8659741b3afe9fa66136738107d28ac
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 29 14:24:56 2005 +0000
+
+ 2005-05-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45159
+
+commit 4be6ea9e269927e9fbf06b0b73f53fef311f569f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 29 11:16:27 2005 +0000
+
+ 2005-05-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45157
+
+commit 7b483f1f48c7abc9d0c17a1fb34b30ddaa7058bb
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 28 18:02:41 2005 +0000
+
+ 2005-05-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45147
+
+commit e360150e81b841b0644b5adc604f22f4b71e3987
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 28 17:08:04 2005 +0000
+
+ 2005-05-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45145
+
+commit a781c3a65727b60386604adc6023f3f5a53b3e3e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri May 27 21:41:59 2005 +0000
+
+ 2005-05-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45127
+
+commit 20c2fc7ba73ffaf5506ab9bf487c3f519de5067f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu May 26 17:16:50 2005 +0000
+
+ 2005-05-26 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45064
+
+commit f37723d307325b539fc515774d3988e0c7ff7a14
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 22 18:25:06 2005 +0000
+
+ 2005-05-22 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44892
+
+commit 1d1c3f56953c0cb26c2e695b468ea1da368aaef0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 22 13:31:28 2005 +0000
+
+ 2005-05-22 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44888
+
+commit e32454dae1a3679056fb4ac86ffc81defc3a5eb7
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 22 01:29:00 2005 +0000
+
+ 2005-05-22 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44883
+
+commit fee3f0247077513ba3254ddb410687a11c667b8c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri May 20 21:55:37 2005 +0000
+
+ 2005-05-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44855
+
+commit 1d94e7499dc18c3882f4aa16e977ceeaacddd466
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed May 18 23:02:39 2005 +0000
+
+ 2005-05-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h ia64/codegen.c: Ongoing ia64 work.
+
+ svn path=/trunk/mono/; revision=44722
+
+commit 3f053b86a49d8c41d47ca2ff771bda64ee5a5ddc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed May 18 18:55:54 2005 +0000
+
+ 2005-05-18 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h (ia64_codegen_init): Rename macro parameter.
+
+ svn path=/trunk/mono/; revision=44705
+
+commit 061e9ab4d483c98d6747caad5160bd30fbbf09ab
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 14 19:52:56 2005 +0000
+
+ 2005-05-14 Zoltan Varga <vargaz@freemail.hu>
+
+ * Makefile.am: Only compile libmonoarch if the interpreter is compiled.
+
+ svn path=/trunk/mono/; revision=44526
+
+commit 82a68f6e85fbc7aaa7832584b2f51953871f1390
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 14 17:35:42 2005 +0000
+
+ 2005-05-14 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Add IA64 code generation macros.
+
+ * Makefile.am: Add ia64 subdir.
+
+ svn path=/trunk/mono/; revision=44523
+
+commit 800d43a2433ffc57d904687fdd2b746d5277cab5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu May 5 12:13:33 2005 +0000
+
+ 2005-05-05 Zoltan Varga <vargaz@freemail.hu>
+
+ * alpha/tramp.c: Applied patch from Jakub Bogusz <qboosh@pld-linux.org>.
+
+ svn path=/trunk/mono/; revision=44078
+
+commit 293459dd29bdd85542f499e0530c9504ced01604
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 28 21:09:11 2005 +0000
+
+ 2005-03-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Avoid emitting a rex in some places.
+
+ svn path=/trunk/mono/; revision=42316
+
+commit 140d5636edd892a388da877b7035f1809590e7ff
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 15 19:47:29 2005 +0000
+
+ 2005-03-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_emit_rex): Emit a rex when accessing the
+ byte registers.
+
+ svn path=/trunk/mono/; revision=41848
+
+commit 242ec30220c85e3f69a1dd1d50469771c4ba7047
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 15 17:08:39 2005 +0000
+
+ 2005-03-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (AMD64_BYTE_REGS): Add AMD64_BYTE_REGS macro.
+
+ svn path=/trunk/mono/; revision=41842
+
+commit f7074904827b639bb500dcb92c481ec9f35a88a0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 14 15:17:54 2005 +0000
+
+ 2005-03-14 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add missing AMD64_XMM7.
+
+ svn path=/trunk/mono/; revision=41795
+
+commit d23ce2f6ba82d598af825e20b95cf7938ff5bc39
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Mar 13 16:57:42 2005 +0000
+
+ 2005-03-13 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Remove some unneccesary REXes.
+
+ svn path=/trunk/mono/; revision=41765
+
+commit ad5014de38c4bde6ef12a04bbbcdf0303ac8acc1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 8 11:11:38 2005 +0000
+
+ 2005-03-08 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_sse_cvtsi2sd_reg_reg_size): Add _size
+ variants to some sse2 macros.
+
+ svn path=/trunk/mono/; revision=41557
+
+commit ee4c2805588b6d8291ac4349a520ca9c99050b59
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 8 09:28:19 2005 +0000
+
+ 2005-03-08 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_sse_cvtsd2si_reg_reg): Make this convert
+ to a 64 bit value.
+
+ svn path=/trunk/mono/; revision=41554
+
+commit 3c4a8677815d2ad4e0b47b809ca16b43f33e3f96
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Mar 6 21:25:22 2005 +0000
+
+ 2005-03-06 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add some SSE2 instructions.
+
+ svn path=/trunk/mono/; revision=41491
+
+commit b175669d7abc2f7e83940305cf2cb1f7663569b0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Feb 20 18:48:25 2005 +0000
+
+ 2005-02-20 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add xadd instructions.
+
+ svn path=/trunk/mono/; revision=40956
+
+commit c7a5bc7b7055832a36dc63ba67ad7add33a95d06
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Feb 20 14:16:51 2005 +0000
+
+ 2005-02-20 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_jump_code_size): Do not emit a rex.
+
+ svn path=/trunk/mono/; revision=40934
+
+commit 2cf88a5c39f13e54cc5e5f95ab6021924077c1d8
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Wed Feb 16 04:43:00 2005 +0000
+
+ remove .cvsignore, as this is not used anymore
+
+ svn path=/trunk/mono/; revision=40731
+
+commit 0c1ce771e696eabde58e35deb64c0b578be7a92d
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Jan 10 21:13:14 2005 +0000
+
+ - Fix atomic ops on s390
+ - Implement OP_ATOMIC_xxx operations on s390
+ - Standardize exception handling on s390 with other platforms
+ - Enable out of line bblock support
+ - Check vtable slot belongs to domain when building trampoline
+
+ svn path=/trunk/mono/; revision=38647
+
+commit 9f3d964963eac63f42db702fe80cbfa89e3a73b4
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Mon Dec 13 06:05:53 2004 +0000
+
+ remove svn:executable from *.cs *.c *.h
+
+ svn path=/trunk/mono/; revision=37682
+
+commit c7b8d172d479d75da8d183f9491e4651bbc5b4f7
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Dec 7 04:18:03 2004 +0000
+
+ Fix atomic operations and add initial support for tls support.
+
+ svn path=/trunk/mono/; revision=37284
+
+commit c523c66bf11c9c05df3d77d42f8be9821ad558e5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Nov 25 13:32:53 2004 +0000
+
+ 2004-11-25 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Updates to support the PIC changes.
+
+ svn path=/trunk/mono/; revision=36549
+
+commit da4b0970bffc8f281679bddf7371679910d0a23c
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Nov 19 15:04:41 2004 +0000
+
+ Fri Nov 19 17:29:22 CET 2004 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: counter reg decrement branch values
+ (patch by Geoff Norton <gnorton@customerdna.com>).
+
+
+ svn path=/trunk/mono/; revision=36320
+
+commit 3e56873e56ee01f0195683a20bd44e0fd03db4ee
+Author: Patrik Torstensson <totte@mono-cvs.ximian.com>
+Date: Thu Nov 18 18:44:57 2004 +0000
+
+ 2004-11-16 Patrik Torstensson <patrik.torstensson@gmail.com>
+
+ * x86/x86-codegen.h: added opcodes for xadd instructions
+
+
+ svn path=/trunk/mono/; revision=36283
+
+commit 59c3726af38156a306a67c2dd6e755e8bdd0d89a
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Wed Nov 17 03:05:28 2004 +0000
+
+ Add support for siginfo_t as a parameter to mono_arch_is_int_overflow. Support this
+ routine in s390.
+
+ svn path=/trunk/mono/; revision=36188
+
+commit 149905478e1af4189a0cd9cf3f0e294dbb2bccbc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Nov 15 19:00:05 2004 +0000
+
+ 2004-11-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/x86-64-codegen.h: Get rid of this.
+
+ svn path=/trunk/mono/; revision=36145
+
+commit b982bf7e3e3e98afa37544b4a197d406f00b5e5a
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Mon Nov 8 03:19:16 2004 +0000
+
+ fix
+
+ svn path=/trunk/mono/; revision=35803
+
+commit 4c5436f259d4a109ab352f2ec7b7891cdce76cc9
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Mon Sep 6 15:07:37 2004 +0000
+
+ fix warning
+
+ svn path=/trunk/mono/; revision=33415
+
+commit 3a8f0a20bd939db788d3fd871b4c0ca37a4d0f96
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Wed Sep 1 01:04:04 2004 +0000
+
+ Support short forms of push imm
+
+ svn path=/trunk/mono/; revision=33128
+
+commit e11c33f0ae258eb62dd5fc2e4c6ce12952d25233
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Aug 29 21:04:04 2004 +0000
+
+ 2004-08-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_imul_reg_membase_size): Fix REX
+ generation.
+
+ svn path=/trunk/mono/; revision=33003
+
+commit b0791969d5ddbcb465d86bcd42c86150f653a9a1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Aug 29 11:11:38 2004 +0000
+
+ 2004-08-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: More SSE work.
+
+ svn path=/trunk/mono/; revision=32992
+
+commit 8ca359bb4894521802e1f2044ec55a9aada4c08e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Aug 29 09:41:22 2004 +0000
+
+ 2004-08-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add SSE2 instructions.
+
+ svn path=/trunk/mono/; revision=32991
+
+commit 39a59671ff853ab672d9db1c982093ee1c7cc1f8
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Aug 21 20:07:37 2004 +0000
+
+ 2004-08-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (X86_IS_BYTE_REG): Redefine X86_IS_BYTE_REG
+ since under amd64, all 16 registers have a low part.
+
+ svn path=/trunk/mono/; revision=32632
+
+commit c6a18db1cda9d62eaba7e1095f34eb84e7c39a8b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Aug 16 12:58:06 2004 +0000
+
+ 2004-08-16 Zoltan Varga <vargaz@freemail.hu>
+
+ * x86/x86-codegen.h: Add macros for accessing the mod/rm byte.
+
+ svn path=/trunk/mono/; revision=32365
+
+commit 7f2d7df98341055eaf370855c499508599770dec
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Sat Aug 14 18:28:26 2004 +0000
+
+ hush cvs
+
+ svn path=/trunk/mono/; revision=32344
+
+commit ee4209b85e88e6adfc07a057b41747607235805c
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Aug 6 16:28:23 2004 +0000
+
+ Support the MEMCPY(base, base) rule and add initial ARGLIST support
+
+ svn path=/trunk/mono/; revision=31985
+
+commit ee8712fd77bdd445d98c511a07f29b5136368201
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Aug 5 23:28:29 2004 +0000
+
+ Add s390x
+
+ svn path=/trunk/mono/; revision=31966
+
+commit 17467e9a25e9a1cf71c170fd85e042a5a11a0f05
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Wed Aug 4 20:43:11 2004 +0000
+
+ Further 64-bit S/390 updates
+
+ svn path=/trunk/mono/; revision=31898
+
+commit 4ad821169050e70979e71bbd5229557570059139
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Wed Aug 4 02:54:52 2004 +0000
+
+ S/390 64-bit support
+ tailc processing fix for S/390 32-bit
+
+ svn path=/trunk/mono/; revision=31840
+
+commit 5ebecc33aca9878d2071c8766e5741cd6434d676
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jul 30 23:11:29 2004 +0000
+
+ Add some s390 specific tests
+
+ svn path=/trunk/mono/; revision=31690
+
+commit 4e44c97a16962680e5009c97c0022e10ddbbad30
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jul 30 18:23:23 2004 +0000
+
+ Optimize code generation macros and standardize
+
+ svn path=/trunk/mono/; revision=31683
+
+commit 57ac232b2805d02a4e2b6322ed9532313337e56c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jul 30 16:01:49 2004 +0000
+
+ 2004-07-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31664
+
+commit 128d13d3973f07f5afba3ac7022bd9a4e7550626
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Thu Jul 29 17:10:53 2004 +0000
+
+ 2004-07-29 Ben Maurer <bmaurer@ximian.com>
+
+ * x86/x86-codegen.h: Add opcodes for cmp BYTE PTR [eax], imm
+
+ svn path=/trunk/mono/; revision=31622
+
+commit 77b5d5d9a5c508cef6a93be733818c446b9fe12c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 28 20:14:03 2004 +0000
+
+ 2004-07-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31586
+
+commit a451b99d1a51fe3ffa7334ffbe6865f388e549c0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jul 24 18:29:32 2004 +0000
+
+ 2004-07-24 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31431
+
+commit b58d4fba4fad9c9cd52604adf39ffe578e407b14
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jul 23 20:05:59 2004 +0000
+
+ 2004-07-23 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31426
+
+commit c7d11ced2179a38a406489b57f4a2f317fbe5da3
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jul 23 16:07:08 2004 +0000
+
+ 2004-07-23 zovarga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31416
+
+commit f69c71790b01b62dd17d4479db005c3ef68e5e38
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Jul 12 23:03:57 2004 +0000
+
+ Add mvcl instruction
+
+ svn path=/trunk/mono/; revision=31055
+
+commit c9c82671d87761dc9a06b78082402924cf8f540d
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Jul 12 12:05:08 2004 +0000
+
+ Add instructions to support enhanced memory-to-memory operations.
+
+ svn path=/trunk/mono/; revision=31039
+
+commit 08a92e1c00c0a0cf3c446257b446939062605260
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jun 30 15:04:48 2004 +0000
+
+ 2004-06-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Add SPARC64 support.
+
+ svn path=/trunk/mono/; revision=30577
+
+commit d1881ea0cd90053526fa30405f4aeac90e06b485
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jun 18 20:03:01 2004 +0000
+
+ Fix broken ABI for stack parameters
+
+ svn path=/trunk/mono/; revision=29915
+
+commit 4e0bce5ca726ed3d2a33d6cfdc3b41b04fcb91f8
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jun 17 16:25:19 2004 +0000
+
+ API cleanup fixes.
+
+ svn path=/trunk/mono/; revision=29787
+
+commit 1ac8bbc10c8f2cff9fe8aef20bee51612aa77f88
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jun 16 15:24:15 2004 +0000
+
+ Wed Jun 16 18:11:41 CEST 2004 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am, *.c, *.h: more API cleanups.
+
+ svn path=/trunk/mono/; revision=29691
+
+commit cf789b0df2ab67298e712242ca201bd01d38c254
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 21 13:04:55 2004 +0000
+
+ More encoding fixes.
+
+ svn path=/trunk/mono/; revision=27820
+
+commit 47892f7ea09d90ff4385b3f9c3796d5ce80ee76d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon May 10 14:37:42 2004 +0000
+
+ Fix macros.
+
+ svn path=/trunk/mono/; revision=27028
+
+commit e85ff74df8db9dbeaa2f923b2d4b451fd84dcdc0
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Sat May 8 01:03:26 2004 +0000
+
+ 2004-05-07 Bernie Solomon <bernard@ugsolutions.com>
+
+ * ppc/ppc-codegen.h: remove GCC-ism in ppc_emit32
+
+ svn path=/trunk/mono/; revision=26957
+
+commit f4dcc4e46be455a7a289a969529ba4a1cd0bc3f3
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri May 7 19:53:40 2004 +0000
+
+ Bring s390 JIT up to date.
+
+ svn path=/trunk/mono/; revision=26943
+
+commit e79a83571f6126771c5e997560dd7e15c540df3f
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Fri Apr 30 03:47:45 2004 +0000
+
+ 2004-04-29 Bernie Solomon <bernard@ugsolutions.com>
+
+ * ppc/tramp.c: use sizeof (stackval), fix
+ delegate tramp frame layout for Apple
+
+ svn path=/trunk/mono/; revision=26383
+
+commit f05e6864576c8c9e827cf6affbaff770732628d4
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Apr 29 18:59:24 2004 +0000
+
+ Fix stmw opcode with signed offsets.
+
+ svn path=/trunk/mono/; revision=26328
+
+commit 92e3edf52f04c550767f3ae59c0f7fcefb46cbf8
+Author: Urs C. Muff <urs@mono-cvs.ximian.com>
+Date: Wed Apr 28 03:59:07 2004 +0000
+
+ cleanup
+
+ svn path=/trunk/mono/; revision=26114
+
+commit ab07311f8d1aeb258795fc72c5ed216f603db092
+Author: David Waite <david@alkaline-solutions.com>
+Date: Tue Apr 27 04:13:19 2004 +0000
+
+ 2004-04-26 David Waite <mass@akuma.org>
+
+ * unknown.c: modify to have content for defined platforms (to
+ avoid ISO C warning)
+
+ svn path=/trunk/mono/; revision=26036
+
+commit 9b84c8398a2558c61613ec50d3c3546627ac1e2d
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Tue Apr 13 04:31:05 2004 +0000
+
+ ignores
+
+ svn path=/trunk/mono/; revision=25379
+
+commit 8adf42aeb550308e5a30e4308ad639fafa27e7e3
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 30 01:44:17 2004 +0000
+
+ 2004-03-29 Bernie Solomon <bernard@ugsolutions.com>
+
+ * hppa/hppa-codegen.h:
+ fix displacements in FP instrs
+
+ svn path=/trunk/mono/; revision=24755
+
+commit e82c4f6b16e7d3a7bdabe2df046b7ce17d91e716
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 30 01:18:11 2004 +0000
+
+ 2004-03-29 Bernie Solomon <bernard@ugsolutions.com>
+
+ * amd64/tramp.c:
+ * arm/tramp.c:
+ * hppa/tramp.c:
+ * ppc/tramp.c:
+ * s390/tramp.c:
+ * sparc/tramp.c:
+ * x86/tramp.c:
+ remove child from MonoInvocation as it isn't used.
+
+ svn path=/trunk/mono/; revision=24751
+
+commit 73296dcd03106668c5db4511948983bdadeaee2f
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 23 22:01:55 2004 +0000
+
+ 2004-03-23 Bernie Solomon <bernard@ugsolutions.com>
+
+ * hppa/hppa-codegen.h: created
+
+ * hppa/tramp.c: changed style to be more like
+ other platforms.
+
+ * hppa/Makefile.am: add hppa-codegen.h
+
+ svn path=/trunk/mono/; revision=24504
+
+commit 6e46d909fa182adf4051e1a3c07bae63b93a2bc3
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 16 19:22:52 2004 +0000
+
+ 2004-03-16 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Add v9 branches with prediction.
+
+ svn path=/trunk/mono/; revision=24153
+
+commit 49a337364d8413d2528fe97e68f16ef610bb3c6a
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:20:03 2004 +0000
+
+ Add
+
+ svn path=/trunk/mono/; revision=24136
+
+commit ce4b3b024bba2c8bd4d874a75ef7aa23e118abf7
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:16:35 2004 +0000
+
+ Rename, since stupid cvs gets confused with the dash in x86-64
+
+ svn path=/trunk/mono/; revision=24134
+
+commit 01dc8bdaddab8f9b1c939716c36d13a35cf2494d
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:16:07 2004 +0000
+
+ Added back
+
+ svn path=/trunk/mono/; revision=24133
+
+commit a97ef493bb1e42b3afa548e47e3e14afe028b3ef
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:03:49 2004 +0000
+
+ Add x86-64
+
+ svn path=/trunk/mono/; revision=24131
+
+commit 25f79c5f1b26de4e7a413128d37731e1fcf09f14
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 16 00:02:55 2004 +0000
+
+ 2004-03-15 Bernie Solomon <bernard@ugsolutions.com>
+
+ * sparc/sparc-codegen.h: tweak sparc_mov_reg_reg
+ so Sun's dis command recognizes it.
+
+ svn path=/trunk/mono/; revision=24084
+
+commit 38dd3d4c585c7e9cc116b7dfb5e89356c4d02da2
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 15 17:28:56 2004 +0000
+
+ 2004-03-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Add some v9 instructions.
+
+ svn path=/trunk/mono/; revision=24050
+
+commit 36d64a0bbf92ca51335ddcb87627a8194f601820
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Mar 11 18:23:26 2004 +0000
+
+ 2004-03-11 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Ongoing sparc work.
+
+ svn path=/trunk/mono/; revision=23926
+
+commit 7e46377b331225994068d848d9ff8ceaeb96d38a
+Author: Duncan Mak <duncan@mono-cvs.ximian.com>
+Date: Mon Mar 8 01:47:03 2004 +0000
+
+ 2004-03-07 Duncan Mak <duncan@ximian.com>
+
+ * Makefile.am: Removed the reference to 'x86-64'. This was the cause
+ of the missing Mono daily tarballs, 'make dist' wasn't working.
+
+ We do have an 'amd64' directory, but it doesn't make it in 'make
+ dist'.
+
+ svn path=/trunk/mono/; revision=23784
+
+commit 94156ea640c77f37c64332acd21adf4170ecb67b
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Sat Feb 28 15:53:18 2004 +0000
+
+ Add
+
+ svn path=/trunk/mono/; revision=23562
+
+commit c2492eb99fe2c3e148a8dc629cc283fafad7af7c
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Feb 27 17:03:30 2004 +0000
+
+ Remove amd64
+
+ svn path=/trunk/mono/; revision=23540
+
+commit c58af24e593b96f1ccc7819ab100063aa4db3c54
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Feb 27 17:03:17 2004 +0000
+
+ Add x86-64 directory
+
+ svn path=/trunk/mono/; revision=23539
+
+commit 7fd6186b66f081ef6c0fca7708ddf8a641a09eae
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Feb 24 18:01:50 2004 +0000
+
+ Add amd64 support patch from Zalman Stern
+
+ svn path=/trunk/mono/; revision=23411
+
+commit 5d0cafa77c2cd95cb92a2990184bac64ec287016
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Feb 19 14:14:37 2004 +0000
+
+ 2004-02-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Fix lots of opcodes + add new ones.
+
+ svn path=/trunk/mono/; revision=23248
+
+commit f9f3c20b070f92bcf6f85f5bd68a24c3434fe6c4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Feb 19 14:13:23 2004 +0000
+
+ 2004-02-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/tramp.c: Fix alignment of structures containing doubles.
+
+ svn path=/trunk/mono/; revision=23247
+
+commit bb16201aaa018434f551c2657d9e38f28dfe8904
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Feb 2 15:56:15 2004 +0000
+
+ 2004-02-02 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/tramp.c: Implement all floating point argument passing conventions in
+ Sparc V8. Also fix structure passing in V8.
+
+ svn path=/trunk/mono/; revision=22704
+
+commit 66607f84556593e2c3aa39bba418801193b6fddf
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Sun Jan 18 18:00:40 2004 +0000
+
+ Apply patches from Neale Ferguson for s390 support
+
+ svn path=/trunk/mono/; revision=22226
+
+commit 963e1b962894e9b434a2e80e63394bd0d34e68b8
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Jan 3 21:42:37 2004 +0000
+
+ Codegen macros for mips.
+
+ svn path=/trunk/mono/; revision=21658
+
+commit 7e4789fdfc87f75e63612fe0aca1f66d76134ba9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Dec 3 16:48:07 2003 +0000
+
+ Typo fix.
+
+ svn path=/trunk/mono/; revision=20745
+
+commit 96651158bf48aa1c31b5f2e3ca4cbf904211b1dc
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Nov 13 15:23:48 2003 +0000
+
+ Thu Nov 13 16:24:29 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: fixed most of the incorrect macros from ct.
+
+ svn path=/trunk/mono/; revision=19938
+
+commit ebebe8e4565897dfaad69911c88f4dda134d4b84
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Oct 31 13:03:36 2003 +0000
+
+ 2003-10-31 Zoltan Varga <vargaz@freemail.hu>
+
+ * */tramp.c (mono_create_method_pointer): Rename to
+ mono_arch_create_method_pointer, move common code to a new function in
+ interp.c.
+
+ * */tramp.c (mono_create_trampoline): Rename to
+ mono_arch_create_trampoline for consistency.
+
+ svn path=/trunk/mono/; revision=19500
+
+commit c41c989929efaf77826634392c8ce9c54525809d
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Oct 14 05:17:17 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * x86/tramp.c: restore EDX after memcpy call
+
+ svn path=/trunk/mono/; revision=19024
+
+commit e4f9a75ed58f5ca214a685041f2a538e2f40fe1f
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Mon Oct 13 22:56:37 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * Makefile.am: add hppa subdir
+
+ svn path=/trunk/mono/; revision=18999
+
+commit fa30eb232e53c9e39eec1bd44189e8ac29ba1644
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Mon Oct 13 22:48:11 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * hppa/tramp.c: add initial implementation - this is 64 bit only
+ hppa/Makefile.am hppa/.cvsignore: added
+
+ svn path=/trunk/mono/; revision=18996
+
+commit 0b0945abf1e873f6a8dfb527236d8cce2ce15574
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Mon Oct 13 22:38:25 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * sparc/sparc-codegen.h sparc/tramp.c: add initial implementation
+ for V9 (64 bit), cover more 32 bit cases as well.
+
+ svn path=/trunk/mono/; revision=18995
+
+commit 6519bafeae686f3b32870a17dc1c84ae90ec95f9
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Sep 3 08:10:57 2003 +0000
+
+ 2003-09-03 Zoltan Varga <vargaz@freemail.hu>
+
+ * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com).
+
+ svn path=/trunk/mono/; revision=17839
+
+commit 935c93eeaff3ad8ccee032ade3584a7f6ab8f4a1
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Mon Aug 25 13:38:19 2003 +0000
+
+ .cvsignore update
+
+ svn path=/trunk/mono/; revision=17581
+
+commit 0fed0582997210e2a0ac71a527dbd319a85aebcb
+Author: ct <ct@localhost>
+Date: Sun Aug 24 22:49:45 2003 +0000
+
+ completed the set of floating point ops
+
+ svn path=/trunk/mono/; revision=17564
+
+commit 3d0f6d935e3a9c180d0bbb14fc371d40e53b7872
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Aug 21 15:23:31 2003 +0000
+
+ 2003-08-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com).
+
+ svn path=/trunk/mono/; revision=17470
+
+commit ed628ad0776db600fab8d5e4bcd6b563f5e808fd
+Author: ct <ct@localhost>
+Date: Tue Aug 19 03:04:34 2003 +0000
+
+ added more asm macros for floating point subtraction of single/double/quad
+
+ svn path=/trunk/mono/; revision=17394
+
+commit 6260d65a087be486df039c80eba92e44eb7a220d
+Author: ct <ct@localhost>
+Date: Tue Aug 19 02:53:23 2003 +0000
+
+ added floating point instructions for adding double, single, and quad numbers
+
+ svn path=/trunk/mono/; revision=17393
+
+commit c750ad8fea95e1fc81150e516ee26fbe79ab570d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Aug 7 14:13:05 2003 +0000
+
+ Fixed imm16 range check.
+
+ svn path=/trunk/mono/; revision=17157
+
+commit ebc38557433accd79fce2e38dff0505dfded5691
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jul 31 14:32:42 2003 +0000
+
+ Thu Jul 31 16:19:07 CEST 2003 Paolo Molaro <lupus@ximian.com>
+
+ * configure.in, etc.: portability fixes and support for
+ buidling outside the srcdir from Laurent Morichetti <l_m@pacbell.net>.
+
+ svn path=/trunk/mono/; revision=16937
+
+commit 6e851a87092161092c6e8f06f4de13fb45bc04a6
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Jul 1 11:12:47 2003 +0000
+
+ Tue Jul 1 13:03:43 CEST 2003 Paolo Molaro <lupus@ximian.com>
+
+ * alpha/tramp.c: update from Laramie Leavitt (lar@leavitt.us).
+
+ svn path=/trunk/mono/; revision=15809
+
+commit c439e3df5cfa7c67d976258228cb9188a218c21d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jun 25 13:18:00 2003 +0000
+
+ FP control word enum.
+
+ svn path=/trunk/mono/; revision=15623
+
+commit 2ad34b0dc225bf0b2efeea63c2f9287a1dbad162
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Jun 9 18:28:54 2003 +0000
+
+ Small updates.
+
+ svn path=/trunk/mono/; revision=15250
+
+commit df86960d595f0284a453fe3fc67687b707148dbf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed May 21 17:57:05 2003 +0000
+
+ Some fixes and more complete support.
+
+ svn path=/trunk/mono/; revision=14769
+
+commit 3af153bd53728da9da9215141b1341d60b447bd3
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed May 21 12:45:22 2003 +0000
+
+ 2003-05-21 Dietmar Maurer <dietmar@ximian.com>
+
+ * mini-x86.c (mono_arch_get_allocatable_int_vars): dont allocate
+ I1 to registers because there is no simply way to sign extend 8bit
+ quantities in caller saved registers on x86.
+
+ * inssel-float.brg: set costs of some rules to 2 so
+ that monobure always select the arch. specific ones if supplied,
+ regardless of the order we pass the files to monoburg.
+
+ svn path=/trunk/mono/; revision=14757
+
+commit c4eeb3dfdd19546fb0712e5306d8d96a9a07580e
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Tue May 20 10:44:31 2003 +0000
+
+ 2003-05-20 Dietmar Maurer <dietmar@ximian.com>
+
+ * mini-x86.c (mono_arch_get_allocatable_int_vars): allocate 8/16
+ bit values to registers
+
+ svn path=/trunk/mono/; revision=14720
+
+commit 3a48ea89b161b268bb74f013cc36f6aec59e550b
+Author: Malte Hildingson <malte@mono-cvs.ximian.com>
+Date: Thu May 1 23:42:01 2003 +0000
+
+ * tramp.c (mono_create_trampoline): tiny register allocation fix for reference types
+
+ svn path=/trunk/mono/; revision=14195
+
+commit 7595b109642f29ffe0cf8bb3e4411243b92a606f
+Author: Malte Hildingson <malte@mono-cvs.ximian.com>
+Date: Sun Apr 27 16:04:54 2003 +0000
+
+ * tramp.c (alloc_code_buff): posix memory protection.
+ (mono_create_trampoline): new string marshaling + minor fixes.
+ (mono_create_method_pointer): delegates fix.
+
+ svn path=/trunk/mono/; revision=14046
+
+commit dfe276d1e1d116b113a639eecbc14c3661af5462
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Sun Apr 27 14:50:16 2003 +0000
+
+ arm-WMMX.h: initial WirelessMMX support for ARM codegen;
+
+ svn path=/trunk/mono/; revision=14044
+
+commit 27eb0661916c7c65b43def99be92895c61f4d315
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Sun Apr 27 14:47:57 2003 +0000
+
+ * ARM codegen update;
+
+ svn path=/trunk/mono/; revision=14043
+
+commit e1b54daadf68eef0608ac03bd6fe4dc374d78675
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sun Apr 27 11:40:11 2003 +0000
+
+ Make the debugging output off by default.
+
+ svn path=/trunk/mono/; revision=14039
+
+commit e679a120b848ea9e35e7c8a38ca3e03a386371c7
+Author: Patrik Torstensson <totte@mono-cvs.ximian.com>
+Date: Fri Feb 14 10:01:29 2003 +0000
+
+ 2003-02-14 Patrik Torstensson
+
+ * x86-codegen.h: Added fstsw op code for getting fp flags
+
+ svn path=/trunk/mono/; revision=11577
+
+commit f468e62377dfe3079f5b2bade1f43d239842e381
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Feb 1 10:02:52 2003 +0000
+
+ Sat Feb 1 10:59:31 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * alpha/*: update from Laramie.
+
+ svn path=/trunk/mono/; revision=11090
+
+commit cc3953655f65398b40e11fdcc97b1ae47bebfdc1
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Jan 27 11:54:14 2003 +0000
+
+ Mon Jan 27 12:49:10 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * alpha/*: start of the port to the alpha architecture by
+ Laramie Leavitt (<lar@leavitt.us>).
+
+ svn path=/trunk/mono/; revision=10942
+
+commit 898dd64bddf69974ae9a22d6aa0ce9625fc9a5a0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Jan 21 16:33:33 2003 +0000
+
+ Tue Jan 21 17:29:53 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: completed ppc native code generation by
+ Taylor Christopher P <ct@cs.clemson.edu>.
+
+ svn path=/trunk/mono/; revision=10778
+
+commit d2321af1b58b2fbb84c3b2cf3f6c7c7db0a787a4
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Jan 17 20:17:58 2003 +0000
+
+ Fri Jan 17 21:14:18 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/tramp.c: adapted to work for MacOSX (from a patch by
+ John Duncan).
+
+ svn path=/trunk/mono/; revision=10630
+
+commit 6d1b716753c1cc8a2f5c26338020941aa58ce9d7
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jan 15 15:21:26 2003 +0000
+
+ Update to the API change of a while ago.
+
+ svn path=/trunk/mono/; revision=10545
+
+commit d4f44103ed442b9a6e221b58b68550c1de4dfa2b
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Mon Nov 11 19:13:08 2002 +0000
+
+ Some debugging stubs.
+
+ svn path=/trunk/mono/; revision=8922
+
+commit b669ce7ac5106466cc6d57e9163ca5d6d80611aa
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Oct 24 19:27:13 2002 +0000
+
+ s390 support from Neale Ferguson <Neale.Ferguson@SoftwareAG-USA.com>.
+
+ svn path=/trunk/mono/; revision=8521
+
+commit 457b666522f839e5e94e5fdda2284255b26d79a2
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Mon Oct 7 03:36:50 2002 +0000
+
+ Fix some minor trampoline nags. Now down to 15 failed tests. Delegate code
+ still broken, if anyone wants to help fix it.
+
+ svn path=/trunk/mono/; revision=8041
+
+commit b6d66c3ac8ae39c47b99dd8b8a7813e6f60c47e7
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Thu Oct 3 15:30:05 2002 +0000
+
+ Changes to tramp.c. Pass more tests.
+
+ svn path=/trunk/mono/; revision=7966
+
+commit e5d299dd18e820d33cf1d74e0e2de53e163cc07b
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Wed Sep 25 04:50:10 2002 +0000
+
+ Stupid off-by-one error fixed.
+
+ The problem was that I incremented gr as if we were on a PPC box. Sparc
+ doesn't need such "alignment" of the registers.
+
+ svn path=/trunk/mono/; revision=7800
+
+commit a9d8f44092c7c313efae893ff64306dc92985110
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Wed Sep 25 01:52:30 2002 +0000
+
+ arch/sparc/tramp.c: Fixed once again. Now works, mostly.
+ io-layer/atomic.h: It's sparc on gcc/solaris, and __sparc__ on gcc/linux.
+ had to add an #ifdef.
+
+ svn path=/trunk/mono/; revision=7798
+
+commit 0110bf4a5a435c5d60583887e0e0f28b7993a4cf
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Mon Sep 23 02:25:43 2002 +0000
+
+ Starting rewrite of trampolining for SPARC. It needed some cleanup.
+
+ It doesn't work at all now. GO PROGRESS!
+
+ svn path=/trunk/mono/; revision=7728
+
+commit fe7d0f819c55d76f0cb7a54ba66d4368d40385bd
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Thu Sep 19 18:30:56 2002 +0000
+
+ Beginning to add support for Solaris. Tested on Solaris 9.
+
+ Shared handles are still not working, will be addressed soon.
+
+ Trampoline code still broken, expect a rewrite.
+
+ svn path=/trunk/mono/; revision=7622
+
+commit 13eb9f4ebf45ffe17d555458cec8bbecefc71849
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed Aug 28 15:26:29 2002 +0000
+
+ retval value type fixed
+
+ svn path=/trunk/mono/; revision=7127
+
+commit 63315827a2ebc424954f4b8baf40497a5600ce7a
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed Aug 28 14:41:08 2002 +0000
+
+ fixed valuetypes marshaling in delegates
+
+ svn path=/trunk/mono/; revision=7126
+
+commit 82d4a3ff22ea8e8dfb9a3ec2be10657e7e25cd97
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sat Aug 24 23:54:12 2002 +0000
+
+ fixed struct marshaling, 108 tests pass now
+
+ svn path=/trunk/mono/; revision=7013
+
+commit b94511c33193dc728e039fa776bf3b9d5dad4e5b
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed Aug 21 17:47:34 2002 +0000
+
+ fixed delegates
+
+ svn path=/trunk/mono/; revision=6862
+
+commit fafa1892b8b0315cab29de09f09f2aa5041b61a7
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Tue Aug 20 15:03:07 2002 +0000
+
+ This nearly completes SPARC trampoline support for mint/mono. The delegate
+ code still needs some work.
+
+ There are bugs. Send crash reports, as well as .cs code and exe's to
+ crichton@gimp.org
+
+ Also, if anyone gets Bus Errors in the code, let me know as well, I've been
+ hunting down alignment bugs as well.
+
+ svn path=/trunk/mono/; revision=6812
+
+commit f8f8b65c484f48436941e4985cfb4b837cff4ceb
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Aug 5 17:28:10 2002 +0000
+
+ Mon Aug 5 19:21:19 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix random memory read in mono_create_method_pointer.
+
+ svn path=/trunk/mono/; revision=6436
+
+commit dc11862f43a6240bcc35d2ef96fb04750c4bf930
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon Aug 5 16:43:06 2002 +0000
+
+ x86-codegen.h: fixed bug in x86_memindex_emit, for basereg == EBP && disp == imm32;
+
+ svn path=/trunk/mono/; revision=6433
+
+commit 60179dd8c27bf3c080ca2c7db818c01a51c9d4b1
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Aug 5 09:53:43 2002 +0000
+
+ 2002-08-05 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/tramp.c (mono_create_trampoline): fixed stack_size bug
+
+ svn path=/trunk/mono/; revision=6408
+
+commit e13f4a98c6fe61ec768b0da9d8832814a313ed78
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri Aug 2 18:34:20 2002 +0000
+
+ more WIP
+
+ svn path=/trunk/mono/; revision=6363
+
+commit f73afba7e99de872e4e9d9dcf3c7c483632f6bc6
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri Aug 2 18:13:59 2002 +0000
+
+ more surgery
+
+ svn path=/trunk/mono/; revision=6360
+
+commit 347f6a854167fa5a26484b83736de86f5ffd8ea0
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri Aug 2 17:55:44 2002 +0000
+
+ did quick surgery to update for Dietmar's new code
+
+ svn path=/trunk/mono/; revision=6359
+
+commit cc4396df6db395836340d26ad2f2d920f946729f
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Aug 2 07:13:54 2002 +0000
+
+ 2002-08-02 Dietmar Maurer <dietmar@ximian.com>
+
+ * marshal.c (mono_delegate_to_ftnptr): pass delegate->target
+ instead of the delegate itself as this pointer (bug #28383)
+
+ svn path=/trunk/mono/; revision=6348
+
+commit fbb833e1937ec3e3183bd1219e0f2391faa62718
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Aug 1 14:17:18 2002 +0000
+
+ 2002-08-01 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/tramp.c (mono_create_trampoline): also push the value type pointer for
+ methods returning value types.
+ (mono_create_method_pointer): support valuetype returns.
+
+ * interp.c (ves_pinvoke_method): do not call stackval_from_data if the result
+ is a value type.
+
+ svn path=/trunk/mono/; revision=6311
+
+commit 27a4251f2a6fd091ddc8084ad14a8808c136431d
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Aug 1 06:40:11 2002 +0000
+
+ 2002-08-01 Dietmar Maurer <dietmar@ximian.com>
+
+ * interp.c (stackval_from_data): add pinvoke argument
+ (stackval_to_data): add pinvoke argument. We need consider the
+ fact that unmanages structures may have different sizes.
+
+ * x86/tramp.c (mono_create_method_pointer): allocate space for
+ value types.
+
+ svn path=/trunk/mono/; revision=6308
+
+commit 1be0ee94a17d2a4b7edb513d845d88ba5fed8285
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Jul 31 11:53:19 2002 +0000
+
+ 2002-07-31 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/tramp.c: (mono_create_method_pointer): return method->addr for pinvoke methods
+
+ * interp.c (ves_exec_method): bug fix - directly jump to handle_exception.
+
+ svn path=/trunk/mono/; revision=6280
+
+commit 87f9fd554284e9d2037c8757a4211cf710a85ac0
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Jul 31 11:00:53 2002 +0000
+
+ 2002-07-31 Dietmar Maurer <dietmar@ximian.com>
+
+ * interp.c: use the new marshaling code. better delegate/remoting
+ support.
+
+ * debug-helpers.c (mono_method_full_name): only print a number to
+ indicate wrapper type (so that the output is more readable in traces).
+
+ * x86/tramp.c: remove code to handle PInvoke because this is no
+ longer needed.
+
+ svn path=/trunk/mono/; revision=6278
+
+commit ebf4ad275e84a3887798ac765bdc1f0ed457cd5a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Jul 19 12:21:01 2002 +0000
+
+ Fri Jul 19 14:18:36 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix float loads. Simple delegate marshaling fix.
+
+ svn path=/trunk/mono/; revision=5909
+
+commit 2b677a332d7e811ca9cc75d271d069787f0495c1
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon Jul 8 16:13:36 2002 +0000
+
+ 2002-07-08 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: marshaling for SZARRAY
+
+ svn path=/trunk/mono/; revision=5650
+
+commit ef9afb744f4679c465be380b4285928fff50db5e
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sat Jul 6 01:41:14 2002 +0000
+
+ 2002-07-05 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: removed magic hack
+
+ svn path=/trunk/mono/; revision=5614
+
+commit 02476784232f22f91e347750c3fb8018d770d057
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Jun 18 04:38:23 2002 +0000
+
+ Tue Jun 18 10:21:56 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: marshal simple arrays correctly.
+
+ svn path=/trunk/mono/; revision=5316
+
+commit 5ff6eebba3bc5e1662b84a34a276d6842e41ab87
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Jun 1 08:08:34 2002 +0000
+
+ Kill warning.
+
+ svn path=/trunk/mono/; revision=5075
+
+commit 0c268fdddc804751bba57401c02b139368f7a01c
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 31 10:55:37 2002 +0000
+
+ Compilation fixes.
+
+ svn path=/trunk/mono/; revision=5054
+
+commit 9fe623bf5c85da9328f895680d8688987a94427e
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu May 30 11:04:53 2002 +0000
+
+ 2002-05-30 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86.brg (reg): bug fix in LOCALLOC
+
+ * mono.c (main): new switch --nointrinsic to disable memcpy opt.
+
+ * x86.brg: added block copy/init optimizations from
+ Serge (serge@wildwestsoftware.com)
+
+ svn path=/trunk/mono/; revision=5025
+
+commit 1b8d1ed7ce3e489dcf53cc2369a3d6d482d5901d
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Tue May 28 12:23:00 2002 +0000
+
+ 2002-05-28 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86.brg: impl. CKFINITE
+
+ svn path=/trunk/mono/; revision=4988
+
+commit b0826d366f4f32c6ef772c0a9deef5a9b4157f0b
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Mon May 27 22:56:15 2002 +0000
+
+ Updated copyright headers to the standard template
+
+ svn path=/trunk/mono/; revision=4975
+
+commit 027755140cf39776018e520f7cd838e319fb9a34
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu May 23 07:44:00 2002 +0000
+
+ 2002-05-23 Dietmar Maurer <dietmar@ximian.com>
+
+ * delegate.c: move the thread pool to metadata/threadpool.c, code
+ cleanup.
+
+ * threadpool.[ch]: impl. a threadpool that can
+ be used by mint and mono.
+
+ svn path=/trunk/mono/; revision=4875
+
+commit be70e94a20c2c1864f829122085bce03f24cc4e8
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed May 15 14:19:24 2002 +0000
+
+ fixed delegates return values
+
+ svn path=/trunk/mono/; revision=4662
+
+commit 89d436d12d5746d04d9f27d9897853f846d0500e
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon May 13 19:00:42 2002 +0000
+
+ 2002-05-13 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c (emit_save_parameters): fix I8 parameters
+
+ svn path=/trunk/mono/; revision=4601
+
+commit 8e8d0cf9ac1f4aa46da775bed8da214581345ddb
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon May 13 17:24:04 2002 +0000
+
+ introduced DEBUG, disabled by default
+
+ svn path=/trunk/mono/; revision=4599
+
+commit 8d20a830d50aaf3f30869283332d654472f16890
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Fri May 10 19:25:15 2002 +0000
+
+ * x86-codegen.h: renamed FP int macro for consistency (its arg is really a membase, not mem);
+
+ svn path=/trunk/mono/; revision=4500
+
+commit 9fb095d7866ee9963f11e3bd2dcc9b9930320ddc
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri May 10 13:39:09 2002 +0000
+
+ updated for new strings
+
+ svn path=/trunk/mono/; revision=4484
+
+commit 5d0a1992c7fe0252457f6644198654d06ee7a19f
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 10 07:24:08 2002 +0000
+
+ Fix checks in x86_patch().
+
+ svn path=/trunk/mono/; revision=4473
+
+commit 512203d918c6998f9652d23301b553c2bb205788
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon May 6 16:39:01 2002 +0000
+
+ Logged changes to x86-codegen.h
+
+ svn path=/trunk/mono/; revision=4344
+
+commit 9d1e2b5076d08bd02eb28ad8b3f2a27a42449250
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon May 6 16:33:54 2002 +0000
+
+ * x86-codegen.h: added missing shifts;
+ 8-bit ALU operations;
+ FPU ops with integer operand;
+ FIST (without pop);
+
+ svn path=/trunk/mono/; revision=4343
+
+commit 944736b70eb0689f094fe05c7184d36f7b7421bf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 3 12:52:19 2002 +0000
+
+ Added some missing FP opcodes and made x86_patch() handle also the call opcode.
+
+ svn path=/trunk/mono/; revision=4252
+
+commit d8cf0bf0270efb923d7c6e80c4e5d547d1161740
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Apr 29 12:14:39 2002 +0000
+
+ Removed mono_string_new_wrapper().
+
+ svn path=/trunk/mono/; revision=4151
+
+commit cc03dca33b721c5b46cba47ff7a7bb80b820be6d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Apr 22 07:32:11 2002 +0000
+
+ Mon Apr 22 12:57:31 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added loop instructions and made x86_patch fully
+ useful.
+
+ svn path=/trunk/mono/; revision=3950
+
+commit ab877e78de2c3ac01664dc13c13c2f231fca4c11
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Sat Apr 20 14:32:46 2002 +0000
+
+ 2002-04-20 Dietmar Maurer <dietmar@ximian.com>
+
+ * interp.c (ves_exec_method): support internalcall String constructors
+
+ svn path=/trunk/mono/; revision=3925
+
+commit d4ccb473cf835fd07294b7da6a6d4da9e2022dcd
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Apr 10 12:34:16 2002 +0000
+
+ Forgot to commit.
+
+ svn path=/trunk/mono/; revision=3740
+
+commit 9116ce23467ea863a99b860849d867802c32187a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Apr 6 10:40:58 2002 +0000
+
+ Sat Apr 6 16:29:40 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix advancement od argument position on the stack.
+
+ svn path=/trunk/mono/; revision=3652
+
+commit bf0fa05ecc5f3537597c10704414544c50d3a0ed
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Apr 4 04:42:46 2002 +0000
+
+ Remove useless comments in rules.
+
+ svn path=/trunk/mono/; revision=3595
+
+commit 3f3f1e23c3cced2e37ec49361ee3236c524ed107
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Sat Mar 30 11:19:26 2002 +0000
+
+ fixed compiler warnings
+
+ svn path=/trunk/mono/; revision=3514
+
+commit 793cfcbae98d4847ff08aff44ffa27020260c317
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Mar 16 14:37:28 2002 +0000
+
+ Sat Mar 16 19:12:57 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: increase default allocated size for trampolines
+ and assert on overflow.
+
+ svn path=/trunk/mono/; revision=3143
+
+commit af361d9d30702937e3cd9412b987552f4652887a
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Mar 14 09:52:53 2002 +0000
+
+ 2002-03-14 Dietmar Maurer <dietmar@ximian.com>
+
+ * emit-x86.c (arch_create_native_wrapper): new code to generate
+ wrappers for calling native functions.
+
+ * icall.c (ves_icall_InternalInvoke): impl.
+
+ svn path=/trunk/mono/; revision=3103
+
+commit 670be867554bb6f1ed61a17649e21d0e25f66105
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Mar 11 11:24:33 2002 +0000
+
+ Mon Mar 11 16:14:29 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: addex x86_clear_reg() and changed
+ x86_mov_reg_imm() to not check for imm == 0.
+
+ svn path=/trunk/mono/; revision=3051
+
+commit 51d24bbb570af055b885dfe9f06e7717e4bb3b98
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Feb 28 09:35:29 2002 +0000
+
+ impl. more CONV opcodes
+
+ svn path=/trunk/mono/; revision=2761
+
+commit d0370e0ab841b63f60170f3afcae9ee49e9faade
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Feb 28 07:43:49 2002 +0000
+
+ Thu Feb 28 12:34:21 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: start handling of more complex marshaling stuff.
+
+
+ Thu Feb 28 12:33:41 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * marshal.c, marshal.h: start of marshaling interface.
+
+ svn path=/trunk/mono/; revision=2759
+
+commit 29f73f5799fb9274a44c918cb4f63c606f765b96
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Wed Feb 27 09:12:27 2002 +0000
+
+ * Makefile.am: removed SCRIPT_SOURCES to fix automake issues.
+
+ svn path=/trunk/mono/; revision=2710
+
+commit a8b6a875977b2728019ea7cf2ea8dd432fe4469a
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon Feb 25 08:58:43 2002 +0000
+
+ * ChangeLog: ARM-related log entry.
+
+ svn path=/trunk/mono/; revision=2628
+
+commit f703ca24db3d380b37434e9f1cced6d0b45a5470
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon Feb 25 08:56:57 2002 +0000
+
+ * Makefile.am: added arm to DIST_SUBDIRS.
+
+ svn path=/trunk/mono/; revision=2627
+
+commit f107fb14e6c183972bec81e5727381f44c6a5333
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 24 20:46:13 2002 +0000
+
+ (mono_create_method_pointer): implements delegates with parameters
+ and return value
+
+ svn path=/trunk/mono/; revision=2618
+
+commit 2217d1a7da2572afd033b958454b9662c42022b9
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Sun Feb 24 17:44:55 2002 +0000
+
+ * ARM support sources, initial check-in;
+
+ svn path=/trunk/mono/; revision=2615
+
+commit 56dde5e20e11f2d9d2a3522923a5a4729bed469f
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 24 01:40:17 2002 +0000
+
+ 2002-02-24 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c (mono_create_method_pointer): basic delegates
+ implementation, it works for simple delegates now and I am already
+ pretty close to have it working for every delegates, but I am
+ going to sleep and finish it tomorrow?
+
+ svn path=/trunk/mono/; revision=2611
+
+commit 0c4f3b00c8e831077c6ba1b28065e7be81bbff61
+Author: Jeffrey Stedfast <fejj@novell.com>
+Date: Fri Feb 22 19:43:09 2002 +0000
+
+ 2002-02-22 Jeffrey Stedfast <fejj@ximian.com>
+
+ * sparc/tramp.c (mono_create_trampoline): Much tinkering to get
+ the opcodes more correct. Still needs a lot of work.
+
+ svn path=/trunk/mono/; revision=2602
+
+commit 6bb3f7ead4ab8d574273f5bdacf32b29809ace80
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Tue Feb 19 20:57:29 2002 +0000
+
+ ops, fix return value passing
+
+ svn path=/trunk/mono/; revision=2526
+
+commit 725e90ef0e13752e357358ddef152a30beae174f
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Tue Feb 19 20:50:13 2002 +0000
+
+ added stack saving for most arguments
+
+ svn path=/trunk/mono/; revision=2523
+
+commit 5dbc4bd3639f2d012a1103ae1b0f911768e460ab
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Tue Feb 19 19:49:10 2002 +0000
+
+ 2002-02-19 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c (emit_save_parameters): don't start saving 64bit
+ values to
+ even registers
+
+ svn path=/trunk/mono/; revision=2519
+
+commit e756cc154586ebdd6f4bba8b730fca09611874cf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Feb 19 15:40:57 2002 +0000
+
+ Tue Feb 19 20:19:38 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: avoid pointer arthmetric (pointed out by Serge).
+
+
+ Tue Feb 19 20:20:15 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * dump.c: the prolog is before each arg in the custom attribute blob.
+
+ svn path=/trunk/mono/; revision=2513
+
+commit 1da21d342a98bedfc9295846080043d8946f4029
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 17 21:10:29 2002 +0000
+
+ la la la, ChangeLog entries
+
+ svn path=/trunk/mono/; revision=2463
+
+commit b7fa0baa6c15d3ee14a1b67dd5b56d21a931894b
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 17 20:02:39 2002 +0000
+
+ (mono_string_new_wrapper): new helper function, cut&pasted from
+ x86, modified to check for NULL text to avoid branching in
+ generated code
+ (calculate_sizes): updated for string retval changes
+ (emit_call_and_store_retval): updated for string retval
+
+ svn path=/trunk/mono/; revision=2461
+
+commit 2cee2566ae50aa32e13864135260e16fd21bfac1
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 17 19:41:12 2002 +0000
+
+ 2002-02-17 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: fixed minimal stack size, fixed string parameters,
+ fix byte and half word parameters
+
+ * ppc/ppc-codegen.h (ppc_mr): added lhz, lbz, sth
+
+ svn path=/trunk/mono/; revision=2460
+
+commit c6fd0cb7010239a29091a50aa5354e96f74bedf2
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Feb 13 12:22:52 2002 +0000
+
+ added some docu
+
+ svn path=/trunk/mono/; revision=2372
+
+commit 6b6716c9eaa66549c9c1cf86934a54a830afc1b6
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Feb 13 08:29:02 2002 +0000
+
+ pass the domain to mono_string_new
+
+ svn path=/trunk/mono/; revision=2365
+
+commit 0ffc7e417ee15973120c4f3a0cb0f2732c5c6633
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Mon Feb 11 22:48:46 2002 +0000
+
+ More
+
+ svn path=/trunk/mono/; revision=2341
+
+commit 6f7cdfa857058ee3662e1662190315c294188ae0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Feb 11 13:49:06 2002 +0000
+
+ Mon Feb 11 18:40:04 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * sparc/*: sparc codegen header and some untested trampoline code.
+
+ svn path=/trunk/mono/; revision=2315
+
+commit d7a858a6ac5bc37435a157cf41eb63818905a7ea
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Feb 11 07:42:10 2002 +0000
+
+ Mon Feb 11 12:32:35 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix handling of multiple marshaleed strings.
+ * x86/x86-codegen.h: some code to patch branch displacements.
+
+ svn path=/trunk/mono/; revision=2308
+
+commit dd029fa4245c99073ae6863dcb8e1560cc1eedc0
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Feb 1 12:04:34 2002 +0000
+
+ SHR/SHL impl.
+
+ svn path=/trunk/mono/; revision=2224
+
+commit 4a977a50d70eb75760d9555854845d32595c4093
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Feb 1 11:22:35 2002 +0000
+
+ Fri Feb 1 16:03:53 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: exception fixes. Use mono_method_pointer_get ()
+ to easy porting to other archs. Some support for overflow detection.
+
+ Fri Feb 1 16:03:00 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c, ppc/tramp.c: implement mono_method_pointer_get ().
+
+
+ Fri Feb 1 16:13:20 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * class.c: add asserts if we are ever going to scribble over memory.
+ * socket-io.c: not all systems have AF_IRDA defined.
+
+ svn path=/trunk/mono/; revision=2223
+
+commit 2d3dbc6213f3e12d1c7b332d80fec81384612bf8
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Jan 24 01:00:53 2002 +0000
+
+ 2002-01-23 Miguel de Icaza <miguel@ximian.com>
+
+ * x86/tramp.c (mono_create_trampoline): Do not try to create a
+ mono_string_new if the return value from the PInvoke code is
+ NULL.
+
+ 2002-01-23 Miguel de Icaza <miguel@ximian.com>
+
+ * genwrapper.pl: Added wrappers for the mono_glob functions.
+
+ * glob.c: New file, with globing functions used by the Directory
+ code.
+
+ svn path=/trunk/mono/; revision=2139
+
+commit 5291c24b937d193ef9861c87421bab87e0fcc4da
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon Jan 21 20:06:20 2002 +0000
+
+ ppc changes
+
+ svn path=/trunk/mono/; revision=2090
+
+commit b5472227702fc528149111f0c4406c9dadb9a9e0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Jan 14 07:00:24 2002 +0000
+
+ Mon Jan 14 11:50:16 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.c: added overflow condition code and some aliases
+ for the other ccs.
+
+ svn path=/trunk/mono/; revision=1968
+
+commit a18abcd00665e9bc660b90cf4c0bdf86456067af
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jan 10 16:13:26 2002 +0000
+
+ Thu Jan 10 19:36:27 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * class.c: fix mono_class_from_mono_type () for szarray types.
+ Remove unused cache check in mono_class_from_type_spec().
+ * icall.c: *type_from_name () functions handle simple arrays and byref.
+ * reflection.c: handle byref and szarray types. Handle methods without
+ body (gets P/Invoke compilation working). Handle types and fields in
+ get_token ().
+ * reflection.h: add rank to MonoTypeInfo.
+
+
+ Thu Jan 10 20:59:59 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c, interp.h: add a flag to mono_create_trampoline ()
+ to handle runtime methods.
+
+
+ Thu Jan 10 21:01:08 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: mono_create_trampoline (): the runtime argument is
+ needed to handle correctly delegates, the previous change in handling
+ the string return type broke them.
+
+ svn path=/trunk/mono/; revision=1950
+
+commit 66990d65e3ac907fe24cc5411591759ce60472b0
+Author: Matt Kimball <mkimball@mono-cvs.ximian.com>
+Date: Wed Jan 9 01:49:12 2002 +0000
+
+ Tue Jan 8 22:38:41 MST 2002 Matt Kimball <matt@kimball.net>
+
+ * x86/tramp.c: handle strings returned from functions in external
+ libraries by converting to a Mono string object after the pinvoke'd
+ function returns
+
+ svn path=/trunk/mono/; revision=1923
+
+commit ba9f9e77bf38e3bb4b1a888d39c7b0aab8ae09bf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Jan 5 11:15:42 2002 +0000
+
+ Sat Jan 5 15:48:04 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * icall.c: hack to make IsSubType work for TypeBuilders.
+ * reflection.c: emit constructors before methods.
+ Retrieve param names in mono_param_get_objects().
+
+
+ Sat Jan 5 15:45:14 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: allow classname:method name in --debug argument.
+ Fix box opcode for valuetypes. Fix a few opcode to take a 16 bit
+ index instead of 32 (stloc, ldloc, starg, etc.).
+
+
+ Sat Jan 5 15:51:06 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle short integer return types.
+
+ svn path=/trunk/mono/; revision=1852
+
+commit 0635ffef0b38bcf88cd3320939c1d96bf8bb8c0e
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Jan 3 20:13:47 2002 +0000
+
+ Fix build for new automakes, seems to work
+
+ svn path=/trunk/mono/; revision=1795
+
+commit 054ebda213a85e3a8a1770ec5e63831e3a0f06ba
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Dec 20 15:20:42 2001 +0000
+
+ Thu Dec 20 20:13:07 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix create_method_pointer() to pass the arguments
+ correctly and add check for overflow.
+
+ svn path=/trunk/mono/; revision=1656
+
+commit faaadc7132a2cdd8c13adf7fbb79d32461759493
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Dec 17 06:50:02 2001 +0000
+
+ 2001-12-16 Dietmar Maurer <dietmar@ximian.com>
+
+ * emit-x86.c (arch_handle_exception): new code to handle
+ exceptions inside unmanaged code.
+
+ * x86.brg: impl. SAVE_LMF, RESTORE_LMF, pass implizit valuetype
+ address as first argument.
+
+ * x86.brg: pass exceptions on the stack
+
+ * jit.h (ISSTRUCT): new macro to check for real value types
+ (return false for enum types).
+
+ * unicode.c (_wapi_unicode_to_utf8): byteswap UTF16 strings before
+ passing them to iconv
+
+ * file-io.c: raise exceptions if handle is invalid.
+
+ svn path=/trunk/mono/; revision=1603
+
+commit 35430229b14448182d84a7f9348995019251fb28
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Dec 13 11:03:21 2001 +0000
+
+ Thu Dec 13 15:56:53 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: x86_mov_memindex_imm() added.
+
+ svn path=/trunk/mono/; revision=1565
+
+commit 813f9d5a9dcbe48c711bbb8bacc876e976ce0aea
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 21:23:53 2001 +0000
+
+ 2001-11-29 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: use r12 which is volatile instead of non-volatile
+ r14 to avoid saving
+
+ svn path=/trunk/mono/; revision=1482
+
+commit 0a65eb2cf0b69f68849e7196b6e00133b3ecf3fc
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 20:19:00 2001 +0000
+
+ 2001-11-29 Radek Doulik <rodo@ximian.com>
+
+ * Makefile.am (libmonoarch_la_LIBADD): added ppc to DIST_SUBDIRS
+ generate libmonoarch for ppc
+
+ svn path=/trunk/mono/; revision=1478
+
+commit c4f49a88d52479062bd8b95669cb90c1b86242d0
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 19:32:48 2001 +0000
+
+ added test
+
+ svn path=/trunk/mono/; revision=1477
+
+commit 2c1c4889b99aaf4be0b894ea24b4d92201cb282d
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 19:32:19 2001 +0000
+
+ added files for initial ppc support
+
+ svn path=/trunk/mono/; revision=1476
+
+commit 719926a4c59c399767f10b9567859300a768b05a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Nov 27 10:30:39 2001 +0000
+
+ Tue Nov 27 15:24:07 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x96/x86-codegen.c: x86_lea_memindex() added.
+
+ svn path=/trunk/mono/; revision=1447
+
+commit c4a26e54cfa29ea5279d1964ef4ea7f6176c0357
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Nov 19 06:52:53 2001 +0000
+
+ Mon Nov 19 11:37:14 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.c, class.h: add mono_install_trampoline() so that the runtime
+ can register a function to create a trampoline: removes the ugly
+ requirement that a runtime needed to export arch_create_jit_trampoline.
+ * object.h, object.c: added mono_install_handler() so that the runtime
+ can install an handler for exceptions generated in C code (with
+ mono_raise_exception()). Added C struct for System.Delegate.
+ * pedump.c: removed arch_create_jit_trampoline.
+ * reflection.c: some cleanups to allow registering user strings and
+ later getting a token for methodrefs and fieldrefs before the assembly
+ is built.
+ * row-indexes.h: updates and fixes from the new ECMA specs.
+
+
+ Mon Nov 19 11:36:22 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * jit.c: use mono_install_trampoline (), instead of exporting
+ a function to a lower-level library.
+
+
+ Mon Nov 19 11:33:00 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: start adding support for handling exceptions across
+ managed/unmanaged boundaries. Cleanup Delegate method invocation.
+ Pass the correct target object in Delegate::Invoke and use the correct
+ 'this' pointer in ldvirtftn (bugs pointed out by Dietmar).
+
+ Mon Nov 19 11:32:28 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * main.c: remove arch_create_jit_trampoline().
+
+ svn path=/trunk/mono/; revision=1380
+
+commit af643d34335bfdc90a7455f99847e954456bb07d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Nov 14 15:18:56 2001 +0000
+
+ Wed Nov 14 19:21:26 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle boolean as a return value.
+ * x96/x86-codegen.c: x86_widen_memindex() added.
+
+
+ Wed Nov 14 19:23:00 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: move the stack frame dumping code to a function so it can
+ be called from the debugger. Fix virtual method lookup for interfaces.
+ Throw exceptions instead of aborting in more places.
+ Print also the message in an exception. Updates for field renames in
+ corlib.
+
+
+ Wed Nov 14 19:26:06 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.h, class.c: add a max_interface_id to MonoClass.
+ * icall.c: rename my_mono_new_object() to my_mono_new_mono_type()
+ since it's used to do that. Added mono_type_type_from_obj().
+ Make GetType() return NULL instead of segfaulting if the type was not
+ found. Handle simple arrays in assQualifiedName.
+ * object.h: add a struct to represent an Exception.
+ * reflection.c: output call convention in method signature.
+ Add code to support P/Invoke methods and fixed offsets for fields.
+
+ svn path=/trunk/mono/; revision=1352
+
+commit 041ab742894fbd6d90e2ffb3c6fddb60a869e952
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Nov 9 13:40:43 2001 +0000
+
+ 2001-11-09 Dietmar Maurer <dietmar@ximian.com>
+
+ * testjit.c (mono_analyze_stack): new BOX impl.
+
+ * x86.brg: implemented INITOBJ
+
+ * testjit.c (mono_analyze_stack): finished array support
+ (mono_analyze_stack): reimplemented DUP instruction
+
+ svn path=/trunk/mono/; revision=1308
+
+commit bff8e602354a8d32dfaed336600b5f648af06e70
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Nov 8 21:38:32 2001 +0000
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * x86/tramp.c: Include stdlib to kill warning.
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * main.c (dis_property_methods): Added missing colon which avoided
+ setting loc.t
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * interp.c: Include stdlib to kill warning.
+ (check_corlib): Adjust format encodings to remove warnings.
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * reflection.c (build_compressed_metadata): Eliminates warnings
+ and uses 64-bit clean code.
+
+ * metadata.c (mono_type_hash): Change signature to eliminate warnings.
+ (mono_type_equal): Change signature to eliminate warnings.
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * monoburg.y: Include string.h, stdlib.h to kill warnings.
+
+ * sample.brg: Include string.h to remove warnings.
+
+ svn path=/trunk/mono/; revision=1298
+
+commit 306ec85b780f5f9c99ffaf19f51baa6548a298a6
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Nov 7 06:33:48 2001 +0000
+
+ 2001-11-07 Dietmar Maurer <dietmar@ximian.com>
+
+ * emit-x86.c (enter_method): print out all method arguments
+ (x86_magic_trampoline): impl.
+ (arch_create_simple_jit_trampoline): we use different trampolines
+ for static methods (no need to write the address back into to
+ vtable).
+
+ svn path=/trunk/mono/; revision=1278
+
+commit 689da148c801d119d0d2722ef74a497e95c5f1b3
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Oct 22 09:24:31 2001 +0000
+
+ Mon Oct 22 15:20:14 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle boolean, u1 and i1 as return values.
+
+ svn path=/trunk/mono/; revision=1192
+
+commit f6b50c3852378ca35cef63056ddec70585b3ac32
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Oct 10 10:11:17 2001 +0000
+
+ Wed Oct 10 16:07:24 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.c: added x86_set_{reg,mem,membase}.
+
+ svn path=/trunk/mono/; revision=1133
+
+commit 27043fee95be8bec691045d7ab39b1be553550e9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Oct 8 14:33:48 2001 +0000
+
+ Mon Oct 8 20:27:50 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * configure.in: define NO_UNALIGNED_ACCESS for platforms that
+ can't read on unaligned boundaries
+
+
+ Mon Oct 8 16:12:38 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * metadata.c, metadata.h: use MonoArrayType to describe the shape of an array.
+ Guard against calling bsearch with a NULL pointer (pointed out by Laurent Rioux, smoux).
+ * image.c: endian fixes by Laurent Rioux.
+ * object.h, object.c: rename MonoStringObject to MonoString and
+ MonoArrayObject to MonoArray. Change some function names to conform to
+ the style mono_<object>_<action>. mono_string_new_utf16 () takes a
+ guint16* as first argument, so don't use char*.
+ Provide macros to do the interesting things on arrays in a portable way.
+ * threads-pthread.c: updates for the API changes and #include <sched.h>
+ (required for sched_yield()).
+ * icall.c: updates for the API changes above.
+ * Makefile.am, mono-endian.c. mono-endian.h: include unaligned read routines for
+ platforms that need them.
+
+
+ Mon Oct 8 16:13:55 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * get.c, get.h: MonoArray changed in MonoArrayType.
+ * main.c: guard against calling bsearch with a NULL pointer
+ (pointed out by Laurent Rioux, smoux).
+
+
+ Mon Oct 8 16:13:07 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: remove mono_get_ansi_string () and use
+ mono_string_to_utf8 () instead.
+
+
+ Mon Oct 8 16:14:40 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: use the accessors provided in object.h to deal with
+ MonoArrays. Updates for API renames in metadata. Throw exception
+ in ldelema if index is out of bounds.
+
+ svn path=/trunk/mono/; revision=1122
+
+commit 4ff31b89c4d3458dc378cd2e915ed08281a21a8b
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Oct 4 13:32:23 2001 +0000
+
+ Thu Oct 4 19:10:30 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.c: MonoTypes stored in MonoClass are stored as
+ fundamental MonoTypes when the class represents a
+ fundamental type (System.Int32, ...).
+ The TypeHandle return by ldtoken is a MonoType*.
+ * icall.c: ves_icall_get_data_chunk () write out all the
+ PE/COFF stuff. Implement ves_icall_define_method (),
+ ves_icall_set_method_body (), ves_icall_type_from_handle ().
+ * image.c: properly skip unknown streams.
+ * loader.h, loader.c: add type_class to mono_defaults.
+ * metadata.c, metadata.h: export compute_size () as
+ mono_metadata_compute_size () with a better interface.
+ Typo and C&P fixes.
+ * pedump.c: don't try to print the entry point RVA if there is no entry point.
+ * reflection.c, reflection.h: many cleanups, fixes, output method
+ signatures and headers, typedef and typeref info, compress the metadata
+ tables, output all the heap streams, cli header etc.
+ * row-indexes.h: typo fixes.
+
+
+ Thu Oct 4 19:09:13 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: allow marshalling valuetypes if they are
+ 4 bytes long.
+
+
+ Thu Oct 4 19:05:56 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * dis-cil.c: fix printing of exception stuff.
+ * dump.c: display some more info in the typedef table dump.
+ * main.c: typo fix and method list fix.
+
+ svn path=/trunk/mono/; revision=1071
+
+commit 7328e9088acbd2609dff8d07b841c3fafd894d25
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Oct 1 13:07:53 2001 +0000
+
+ Mon Oct 1 18:48:27 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix thinko (s/SUB/ADD/) in stack adjustment
+ and avoid a couple of unnecessary instructions.
+
+ svn path=/trunk/mono/; revision=1042
+
+commit 1fa26f9aa718559d3090d1c1275bf04d574368f0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Sep 28 13:49:47 2001 +0000
+
+ Fri Sep 28 19:26:30 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * metadata.c: fix type comparison for arrays.
+ * loader.h, loader.c: half-assed fix to get more tests work in cygwin.
+ Added a couple of new classes to monodefaults.
+ * icall.c: added a couple of Reflection-related internalcalls.
+ * class.h, class.c: implemented mono_ldtoken () for RuntimeTypeHandles.
+ Added a byval_arg MonoType to MonoClass.
+
+
+ Fri Sep 28 19:43:12 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: marshal valuetypes that are enums.
+
+
+ Fri Sep 28 19:37:46 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: Implemented ldtoken, conv.ovf.i. Use MonoClass->byval_arg
+ (and remove related kludges). Don't choke on access to arrays of
+ references. Throw an exception when an internalcall or P/Invoke
+ function don't have an implementation. Throw and EngineException
+ for unimplemented opcodes.
+
+ svn path=/trunk/mono/; revision=1027
+
+commit 0122a3ea04b06d1d51f2756e48f6392ccac1096d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Sep 27 09:38:19 2001 +0000
+
+ Thu Sep 27 15:34:37 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: in memindex operand you can use X86_NOBASEREG
+ as basereg.
+
+ svn path=/trunk/mono/; revision=995
+
+commit a5844f903a68e9448d7031587ffbd02ed2c4f486
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Sep 26 10:33:18 2001 +0000
+
+ Wed Sep 26 16:29:36 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added memindex addressing mode encoding
+ (and mov to/from register opcodes).
+
+ svn path=/trunk/mono/; revision=984
+
+commit 1f45df6d593cd60780ea121d08ddd035a3418e4a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Sep 24 13:30:32 2001 +0000
+
+ Mon Sep 24 18:49:01 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: don't change a MONO_TYPE_STRING to a char*
+ when it's an argument to an internalcall.
+
+
+ Mon Sep 24 18:56:59 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * object.c, object.h: added mono_ldstr (), mono_string_is_interned () and
+ mono_string_intern () to implement the semantics of the ldstr opcode
+ and the interning of System.Strings.
+ * icall.c: provide hooks to make String::IsIntern and String::Intern
+ internalcalls.
+
+
+ Mon Sep 24 18:50:25 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: catch a few more error conditions with exceptions instead of
+ erroring out.
+ Don't use g_print() in stack traces because it doesn't work with
+ some float values.
+ When we call an instance method of a valuetype class, unbox the 'this'
+ argument if it is an object.
+ Use mono_ldstr () to implement the ldstr opcode: it takes care of
+ interning the string if necessary.
+ Implemented new opcodes: ckfinite, cgt.un, clt.un, ldvirtftn, ldarga.
+ Fixes to handle NaNs when comparing doubles.
+ Make sure the loaded assembly has an entry point defined.
+ Fixed portability bugs in neg and not opcodes.
+
+ svn path=/trunk/mono/; revision=943
+
+commit a995bd527db97e45d979a6b97e0a15a479d2e14b
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sun Sep 23 07:49:26 2001 +0000
+
+ Sun Sep 23 13:44:57 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle MONO_TYPE_CLASS in trampolines.
+
+ svn path=/trunk/mono/; revision=927
+
+commit c9d21b14c718c8e7f3690f5d93ac349bbdd98d88
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Sep 21 12:50:46 2001 +0000
+
+ implemented more opcodes
+
+ svn path=/trunk/mono/; revision=916
+
+commit a0930b7dcd7fe845e1c3c06f3fba6736f88d8bf9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Sep 20 15:31:50 2001 +0000
+
+ Thu Sep 20 16:32:42 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: implemented some more opcodes: calli, rem.un,
+ shr.un, conv.u, cpobj, stobj, conv.r.un, conv.ovf.i1.un,
+ conv.ovf.i2.un, conv.ovf.i4.un, conv.ovf.i8.un, conv.ovf.i.un,
+ conv.ovf.u1.un, conv.ovf.u2.un, conv.ovf.u4.un, conv.ovf.u8.un,
+ conv.ovf.u.un.
+ Fix some 64 bit issues in the array element access code and a small bug.
+ Throw an exception on index out of range instead of asserting.
+ Throw an exception on a NULL array instead of dying.
+ Stomped a memory corruption bug (.cctor methods were freed after
+ executing them, but they are stores in MonoClass now...).
+ Added a simple facility to invoke the debugger when a named
+ function is entered (use the cmdline option --debug method_name).
+ * interp.h: fix 64 bit issue.
+
+ svn path=/trunk/mono/; revision=904
+
+commit e177e60b93378860f0573f458d06cd641770a255
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Sep 18 07:26:43 2001 +0000
+
+ Tue Sep 18 13:23:59 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: remove C++ comments.
+
+ svn path=/trunk/mono/; revision=865
+
+commit 4f874ee6ae2442c99421087b5ad11eae88283d55
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 17 09:10:44 2001 +0000
+
+ 2001-09-17 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86.brg: emit real code for calls
+
+ * testjit.c (create_jit_trampoline): creates a function to trigger jit
+ compilation.
+ (mono_compile_method): reversed argument order
+
+ svn path=/trunk/mono/; revision=842
+
+commit 011e42b68518f5c1397ecdc0417c021b4c524560
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 17 07:18:11 2001 +0000
+
+ 2001-09-17 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/x86-codegen.h (x86_alu_reg_reg): replaced src/dest
+
+ svn path=/trunk/mono/; revision=841
+
+commit c61474703f058c226a94ba9cdfb1d19e3a45eecd
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Sep 12 03:47:43 2001 +0000
+
+ *** empty log message ***
+
+ svn path=/trunk/mono/; revision=792
+
+commit db78bf2c09f07356fe4c8284d1a48fa9867bd2fc
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Sep 10 14:26:02 2001 +0000
+
+ Mon Sep 10 20:19:00 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * configure.in: check for sizeof(void*) and for the architecture.
+
+ Mon Sep 10 17:26:06 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am, x86/Makefile.am: conditional compile logic
+ to make porting to different targets easier.
+
+ Mon Sep 10 17:24:45 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am: make it work for make distcheck.
+
+ Mon Sep 10 20:21:34 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * endian.h, assembly.c: fix some endianness issues.
+
+ Mon Sep 10 20:20:36 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: endian fixes, comments.
+
+ svn path=/trunk/mono/; revision=783
+
+commit ce34fcec9c53a31ba2cd48f22c9a5099d02779e5
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 10 09:34:11 2001 +0000
+
+ *** empty log message ***
+
+ svn path=/trunk/mono/; revision=781
+
+commit 6c07667b555ca78bdad5d7b6e5aa87f8078c1989
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 10 09:14:46 2001 +0000
+
+ added the jit prototype, small fixes
+
+ svn path=/trunk/mono/; revision=780
+
+commit 680963c46ae8b96cca52387e0f5b1a2e39825b90
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Sep 7 12:53:34 2001 +0000
+
+ Fri Sep 7 18:43:06 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: fixes and x86_mov_membase_imm ().
+ * x86/tramp.c: implemented mono_create_method_pointer ():
+ creates a native pointer to a method implementation that can be
+ used as a normal C callback.
+
+
+ Fri Sep 7 18:45:38 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c, interp.h: make ves_exec_method () and stackval_from_data ()
+ non static. Implement a couple of runtime methods needed to
+ use delegates (ves_runtime_method ()).
+ Implemented ldftn opcode.
+
+ svn path=/trunk/mono/; revision=745
+
+commit 4c39a186f2fa0dc3cca3ae6f6dc6584c75341adf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Sep 6 09:46:03 2001 +0000
+
+ Thu Sep 6 15:38:00 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added x86_rdtsc() and fixes.
+ * x86/tramp.c: create trampolines to call pinvoke methods.
+ * x86/Makefile.am: create a libmonoarch convenience library.
+
+
+ Thu Sep 6 15:41:24 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am: link to libmonoarch.
+ * interp.h, interp.c: use mono_create_trampoline ().
+ Pass the command line arguments to Main (String[]) methods.
+
+ svn path=/trunk/mono/; revision=728
+
+commit d3a5cf739f1182a42d20f1d5ace2a272307da87f
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Aug 27 03:43:09 2001 +0000
+
+ Mon Aug 27 09:29:00 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: fix x86_call_code (). x86_mov_regp_reg () added.
+
+ svn path=/trunk/mono/; revision=636
+
+commit 231c25bd596aa45a2962a9c820fc9417985a1f3f
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Aug 18 06:55:29 2001 +0000
+
+ Sat Aug 18 12:40:32 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: fix a couple of buglets and add x86_regp_emit().
+
+ Sat Aug 18 12:42:26 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.c, class.h: load also the methods when loading a class.
+
+ Sat Aug 18 12:43:38 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c, interp.h: added support code to create exceptions.
+ Changed interncal calling convnetion over to MonoInvocation, to support
+ exceptions, walking the stack back and forward and passing the 'this'
+ pointer separately (remove the cludges required before to pass this on the
+ stack). Use alloca heavily for both local vars and a copy of the incoming
+ arguments. Init local vars to zero.
+ Simplify stackval_from_data() and stackval_to_data() to only take a pointer
+ instead of pointer + offset.
+ Implement a few exceptions-related opcodes and the code to run finally, fault and
+ catch blocks as well as a stack trace if no handler is found.
+
+ Sat Aug 18 12:51:28 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * metadata.c, metadata.h: in the signature and method header store
+ only the space required for holding the loca vars and incoming arguments.
+
+ svn path=/trunk/mono/; revision=493
+
+commit 75cdbf5cd16480631ac8579c2c2f230761e4802b
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Aug 8 17:21:29 2001 +0000
+
+ Fixed x86_mov_reg_imm().
+
+ svn path=/trunk/mono/; revision=441
+
+commit 5263eb4d219b8054b29a4d250cec40a7c8170a84
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Aug 8 16:48:32 2001 +0000
+
+ Update copyright
+
+ svn path=/trunk/mono/; revision=440
+
+commit c9397770c008d427da0b7ad058782fc8564c10d3
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Aug 8 13:32:23 2001 +0000
+
+ Wed Aug 8 15:30:05 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h, x86/test.c: added x86 code emitter with
+ test.
+
+ svn path=/trunk/mono/; revision=435
diff --git a/lib/ffts/src/arch/LICENSE b/lib/ffts/src/arch/LICENSE
new file mode 100644
index 0000000..cb4a84d
--- /dev/null
+++ b/lib/ffts/src/arch/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2001, 2002, 2003 Ximian, Inc and the individuals listed
+on the ChangeLog entries.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/ffts/src/arch/Makefile.am b/lib/ffts/src/arch/Makefile.am
new file mode 100644
index 0000000..8741687
--- /dev/null
+++ b/lib/ffts/src/arch/Makefile.am
@@ -0,0 +1,11 @@
+DIST_SUBDIRS = x86 ppc sparc arm arm64 s390x amd64 ia64 mips
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+if ARM
+# arm needs to build some stuff even in JIT mode
+SUBDIRS = $(arch_target)
+endif
+
+EXTRA_DIST = ChangeLog
+
diff --git a/lib/ffts/src/arch/README b/lib/ffts/src/arch/README
new file mode 100644
index 0000000..cfed57d
--- /dev/null
+++ b/lib/ffts/src/arch/README
@@ -0,0 +1,7 @@
+mono_arch
+=========
+
+Part of Mono project, https://github.com/mono
+
+These are C macros that are useful when generating native code on various platforms.
+This code is MIT X11 licensed.
diff --git a/lib/ffts/src/arch/arm/.gitattributes b/lib/ffts/src/arch/arm/.gitattributes
new file mode 100644
index 0000000..4819db1
--- /dev/null
+++ b/lib/ffts/src/arch/arm/.gitattributes
@@ -0,0 +1 @@
+/arm-wmmx.h -crlf
diff --git a/lib/ffts/src/arch/arm/.gitignore b/lib/ffts/src/arch/arm/.gitignore
new file mode 100644
index 0000000..978145d
--- /dev/null
+++ b/lib/ffts/src/arch/arm/.gitignore
@@ -0,0 +1,15 @@
+/Makefile
+/Makefile.in
+/.deps
+/.libs
+/*.o
+/*.la
+/*.lo
+/*.lib
+/*.obj
+/*.exe
+/*.dll
+/arm_dpimacros.h
+/arm_fpamacros.h
+/arm_vfpmacros.h
+/fixeol.sh
diff --git a/lib/ffts/src/arch/arm/Makefile.am b/lib/ffts/src/arch/arm/Makefile.am
new file mode 100644
index 0000000..593574c
--- /dev/null
+++ b/lib/ffts/src/arch/arm/Makefile.am
@@ -0,0 +1,27 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-arm.la
+
+BUILT_SOURCES = arm_dpimacros.h arm_vfpmacros.h
+
+
+libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \
+ arm-codegen.c \
+ arm-codegen.h \
+ arm-dis.c \
+ arm-dis.h
+
+arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th
+ (cd $(srcdir); bash ./dpiops.sh) > $@t
+ mv $@t $@
+
+arm_vfpmacros.h: vfpops.sh vfpm_macros.th vfp_macros.th
+ (cd $(srcdir); bash ./vfpops.sh) > $@t
+ mv $@t $@
+
+CLEANFILES = $(BUILT_SOURCES)
+
+EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th \
+ vfpm_macros.th vfp_macros.th arm-vfp-codegen.h vfpops.sh
+
diff --git a/lib/ffts/src/arch/arm/arm-codegen.c b/lib/ffts/src/arch/arm/arm-codegen.c
new file mode 100644
index 0000000..9914ace
--- /dev/null
+++ b/lib/ffts/src/arch/arm/arm-codegen.c
@@ -0,0 +1,193 @@
+/*
+ * arm-codegen.c
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+#include "arm-codegen.h"
+
+
+arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size) {
+ ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
+
+ /* save args */
+ ARM_PUSH(p, (1 << ARMREG_A1)
+ | (1 << ARMREG_A2)
+ | (1 << ARMREG_A3)
+ | (1 << ARMREG_A4));
+
+ ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR));
+
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
+ ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ return p;
+}
+
+arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs) {
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_ADD_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_ADD_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ ARM_POP_NWB(p, (1 << ARMREG_SP) | (1 << ARMREG_PC) | (pop_regs & 0x3FF));
+
+ return p;
+}
+
+
+/* do not push A1-A4 */
+arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs) {
+ ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
+ /* push_regs upto R10 will be saved */
+ ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR) | (push_regs & 0x3FF));
+
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ /* restore IP from stack */
+ ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
+ ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ return p;
+}
+
+/* Bit scan forward. */
+int arm_bsf(armword_t val) {
+ int i;
+ armword_t mask;
+
+ if (val == 0) return 0;
+ for (i=1, mask=1; (i <= 8 * sizeof(armword_t)) && ((val & mask) == 0); ++i, mask<<=1);
+
+ return i;
+}
+
+
+int arm_is_power_of_2(armword_t val) {
+ return ((val & (val-1)) == 0);
+}
+
+
+/*
+ * returns:
+ * 1 - unable to represent
+ * positive even number - MOV-representable
+ * negative even number - MVN-representable
+ */
+int calc_arm_mov_const_shift(armword_t val) {
+ armword_t mask;
+ int res = 1, shift;
+
+ for (shift=0; shift < 32; shift+=2) {
+ mask = ARM_SCALE(0xFF, shift);
+ if ((val & (~mask)) == 0) {
+ res = shift;
+ break;
+ }
+ if (((~val) & (~mask)) == 0) {
+ res = -shift - 2;
+ break;
+ }
+ }
+
+ return res;
+}
+
+
+int is_arm_const(armword_t val) {
+ int res;
+ res = arm_is_power_of_2(val);
+ if (!res) {
+ res = calc_arm_mov_const_shift(val);
+ res = !(res < 0 || res == 1);
+ }
+ return res;
+}
+
+
+int arm_const_steps(armword_t val) {
+ int shift, steps = 0;
+
+ while (val != 0) {
+ shift = (arm_bsf(val) - 1) & (~1);
+ val &= ~(0xFF << shift);
+ ++steps;
+ }
+ return steps;
+}
+
+
+/*
+ * ARM cannot load arbitrary 32-bit constants directly into registers;
+ * widely used work-around for this is to store constants into a
+ * PC-addressable pool and use LDR instruction with PC-relative address
+ * to load constant into register. Easiest way to implement this is to
+ * embed constant inside a function with unconditional branch around it.
+ * The above method is not used at the moment.
+ * This routine always emits sequence of instructions to generate
+ * requested constant. In the worst case it takes 4 instructions to
+ * synthesize a constant - 1 MOV and 3 subsequent ORRs.
+ */
+arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond) {
+ int mov_op;
+ int step_op;
+ int snip;
+ int shift = calc_arm_mov_const_shift(imm32);
+
+ if ((shift & 0x80000001) != 1) {
+ if (shift >= 0) {
+ ARM_MOV_REG_IMM_COND(p, reg, imm32 >> ((32 - shift) & 31), shift, cond);
+ } else {
+ ARM_MVN_REG_IMM_COND(p, reg, (imm32 ^ (~0)) >> ((32 + 2 + shift) & 31), (-shift - 2), cond);
+ }
+ } else {
+ mov_op = ARMOP_MOV;
+ step_op = ARMOP_ORR;
+
+ if (arm_const_steps(imm32) > arm_const_steps(~imm32)) {
+ mov_op = ARMOP_MVN;
+ step_op = ARMOP_SUB;
+ imm32 = ~imm32;
+ }
+
+ shift = (arm_bsf(imm32) - 1) & (~1);
+ snip = imm32 & (0xFF << shift);
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, 0, 0, mov_op, cond));
+
+ while ((imm32 ^= snip) != 0) {
+ shift = (arm_bsf(imm32) - 1) & (~1);
+ snip = imm32 & (0xFF << shift);
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, reg, 0, step_op, cond));
+ }
+ }
+
+ return p;
+}
+
+
+arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32) {
+ return arm_mov_reg_imm32_cond(p, reg, imm32, ARMCOND_AL);
+}
+
+
+
diff --git a/lib/ffts/src/arch/arm/arm-codegen.h b/lib/ffts/src/arch/arm/arm-codegen.h
new file mode 100644
index 0000000..d4d7f7c
--- /dev/null
+++ b/lib/ffts/src/arch/arm/arm-codegen.h
@@ -0,0 +1,1127 @@
+/*
+ * arm-codegen.h
+ * Copyright (c) 2002-2003 Sergey Chaban <serge@wildwestsoftware.com>
+ * Copyright 2005-2011 Novell Inc
+ * Copyright 2011 Xamarin Inc
+ */
+
+
+#ifndef ARM_H
+#define ARM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned int arminstr_t;
+typedef unsigned int armword_t;
+
+/* Helper functions */
+arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size);
+arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs);
+arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs);
+int arm_is_power_of_2(armword_t val);
+int calc_arm_mov_const_shift(armword_t val);
+int is_arm_const(armword_t val);
+int arm_bsf(armword_t val);
+arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond);
+arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32);
+
+
+
+#if defined(_MSC_VER) || defined(__CC_NORCROFT)
+ void __inline _arm_emit(arminstr_t** p, arminstr_t i) {**p = i; (*p)++;}
+# define ARM_EMIT(p, i) _arm_emit((arminstr_t**)&p, (arminstr_t)(i))
+#else
+# define ARM_EMIT(p, i) do { arminstr_t *__ainstrp = (void*)(p); *__ainstrp = (arminstr_t)(i); (p) = (void*)(__ainstrp+1);} while (0)
+#endif
+
+#if defined(_MSC_VER) && !defined(ARM_NOIASM)
+# define ARM_IASM(_expr) __emit (_expr)
+#else
+# define ARM_IASM(_expr)
+#endif
+
+/* even_scale = rot << 1 */
+#define ARM_SCALE(imm8, even_scale) ( ((imm8) >> (even_scale)) | ((imm8) << (32 - even_scale)) )
+
+
+
+typedef enum {
+ ARMREG_R0 = 0,
+ ARMREG_R1,
+ ARMREG_R2,
+ ARMREG_R3,
+ ARMREG_R4,
+ ARMREG_R5,
+ ARMREG_R6,
+ ARMREG_R7,
+ ARMREG_R8,
+ ARMREG_R9,
+ ARMREG_R10,
+ ARMREG_R11,
+ ARMREG_R12,
+ ARMREG_R13,
+ ARMREG_R14,
+ ARMREG_R15,
+
+
+ /* aliases */
+ /* args */
+ ARMREG_A1 = ARMREG_R0,
+ ARMREG_A2 = ARMREG_R1,
+ ARMREG_A3 = ARMREG_R2,
+ ARMREG_A4 = ARMREG_R3,
+
+ /* local vars */
+ ARMREG_V1 = ARMREG_R4,
+ ARMREG_V2 = ARMREG_R5,
+ ARMREG_V3 = ARMREG_R6,
+ ARMREG_V4 = ARMREG_R7,
+ ARMREG_V5 = ARMREG_R8,
+ ARMREG_V6 = ARMREG_R9,
+ ARMREG_V7 = ARMREG_R10,
+
+ ARMREG_FP = ARMREG_R11,
+ ARMREG_IP = ARMREG_R12,
+ ARMREG_SP = ARMREG_R13,
+ ARMREG_LR = ARMREG_R14,
+ ARMREG_PC = ARMREG_R15,
+
+ /* co-processor */
+ ARMREG_CR0 = 0,
+ ARMREG_CR1,
+ ARMREG_CR2,
+ ARMREG_CR3,
+ ARMREG_CR4,
+ ARMREG_CR5,
+ ARMREG_CR6,
+ ARMREG_CR7,
+ ARMREG_CR8,
+ ARMREG_CR9,
+ ARMREG_CR10,
+ ARMREG_CR11,
+ ARMREG_CR12,
+ ARMREG_CR13,
+ ARMREG_CR14,
+ ARMREG_CR15,
+
+ /* XScale: acc0 on CP0 */
+ ARMREG_ACC0 = ARMREG_CR0,
+
+ ARMREG_MAX = ARMREG_R15
+} ARMReg;
+
+/* number of argument registers */
+#define ARM_NUM_ARG_REGS 4
+
+/* bitvector for all argument regs (A1-A4) */
+#define ARM_ALL_ARG_REGS \
+ (1 << ARMREG_A1) | (1 << ARMREG_A2) | (1 << ARMREG_A3) | (1 << ARMREG_A4)
+
+
+typedef enum {
+ ARMCOND_EQ = 0x0, /* Equal; Z = 1 */
+ ARMCOND_NE = 0x1, /* Not equal, or unordered; Z = 0 */
+ ARMCOND_CS = 0x2, /* Carry set; C = 1 */
+ ARMCOND_HS = ARMCOND_CS, /* Unsigned higher or same; */
+ ARMCOND_CC = 0x3, /* Carry clear; C = 0 */
+ ARMCOND_LO = ARMCOND_CC, /* Unsigned lower */
+ ARMCOND_MI = 0x4, /* Negative; N = 1 */
+ ARMCOND_PL = 0x5, /* Positive or zero; N = 0 */
+ ARMCOND_VS = 0x6, /* Overflow; V = 1 */
+ ARMCOND_VC = 0x7, /* No overflow; V = 0 */
+ ARMCOND_HI = 0x8, /* Unsigned higher; C = 1 && Z = 0 */
+ ARMCOND_LS = 0x9, /* Unsigned lower or same; C = 0 || Z = 1 */
+ ARMCOND_GE = 0xA, /* Signed greater than or equal; N = V */
+ ARMCOND_LT = 0xB, /* Signed less than; N != V */
+ ARMCOND_GT = 0xC, /* Signed greater than; Z = 0 && N = V */
+ ARMCOND_LE = 0xD, /* Signed less than or equal; Z = 1 && N != V */
+ ARMCOND_AL = 0xE, /* Always */
+ ARMCOND_NV = 0xF, /* Never */
+
+ ARMCOND_SHIFT = 28
+} ARMCond;
+
+#define ARMCOND_MASK (ARMCOND_NV << ARMCOND_SHIFT)
+
+#define ARM_DEF_COND(cond) (((cond) & 0xF) << ARMCOND_SHIFT)
+
+
+
+typedef enum {
+ ARMSHIFT_LSL = 0,
+ ARMSHIFT_LSR = 1,
+ ARMSHIFT_ASR = 2,
+ ARMSHIFT_ROR = 3,
+
+ ARMSHIFT_ASL = ARMSHIFT_LSL
+ /* rrx = (ror, 1) */
+} ARMShiftType;
+
+
+typedef struct {
+ armword_t PSR_c : 8;
+ armword_t PSR_x : 8;
+ armword_t PSR_s : 8;
+ armword_t PSR_f : 8;
+} ARMPSR;
+
+typedef enum {
+ ARMOP_AND = 0x0,
+ ARMOP_EOR = 0x1,
+ ARMOP_SUB = 0x2,
+ ARMOP_RSB = 0x3,
+ ARMOP_ADD = 0x4,
+ ARMOP_ADC = 0x5,
+ ARMOP_SBC = 0x6,
+ ARMOP_RSC = 0x7,
+ ARMOP_TST = 0x8,
+ ARMOP_TEQ = 0x9,
+ ARMOP_CMP = 0xa,
+ ARMOP_CMN = 0xb,
+ ARMOP_ORR = 0xc,
+ ARMOP_MOV = 0xd,
+ ARMOP_BIC = 0xe,
+ ARMOP_MVN = 0xf,
+
+
+ /* not really opcodes */
+
+ ARMOP_STR = 0x0,
+ ARMOP_LDR = 0x1,
+
+ /* ARM2+ */
+ ARMOP_MUL = 0x0, /* Rd := Rm*Rs */
+ ARMOP_MLA = 0x1, /* Rd := (Rm*Rs)+Rn */
+
+ /* ARM3M+ */
+ ARMOP_UMULL = 0x4,
+ ARMOP_UMLAL = 0x5,
+ ARMOP_SMULL = 0x6,
+ ARMOP_SMLAL = 0x7,
+
+ /* for data transfers with register offset */
+ ARM_UP = 1,
+ ARM_DOWN = 0
+} ARMOpcode;
+
+typedef enum {
+ THUMBOP_AND = 0,
+ THUMBOP_EOR = 1,
+ THUMBOP_LSL = 2,
+ THUMBOP_LSR = 3,
+ THUMBOP_ASR = 4,
+ THUMBOP_ADC = 5,
+ THUMBOP_SBC = 6,
+ THUMBOP_ROR = 7,
+ THUMBOP_TST = 8,
+ THUMBOP_NEG = 9,
+ THUMBOP_CMP = 10,
+ THUMBOP_CMN = 11,
+ THUMBOP_ORR = 12,
+ THUMBOP_MUL = 13,
+ THUMBOP_BIC = 14,
+ THUMBOP_MVN = 15,
+ THUMBOP_MOV = 16,
+ THUMBOP_CMPI = 17,
+ THUMBOP_ADD = 18,
+ THUMBOP_SUB = 19,
+ THUMBOP_CMPH = 19,
+ THUMBOP_MOVH = 20
+} ThumbOpcode;
+
+
+/* Generic form - all ARM instructions are conditional. */
+typedef struct {
+ arminstr_t icode : 28;
+ arminstr_t cond : 4;
+} ARMInstrGeneric;
+
+
+
+/* Branch or Branch with Link instructions. */
+typedef struct {
+ arminstr_t offset : 24;
+ arminstr_t link : 1;
+ arminstr_t tag : 3; /* 1 0 1 */
+ arminstr_t cond : 4;
+} ARMInstrBR;
+
+#define ARM_BR_ID 5
+#define ARM_BR_MASK 7 << 25
+#define ARM_BR_TAG ARM_BR_ID << 25
+
+#define ARM_DEF_BR(offs, l, cond) ((offs) | ((l) << 24) | (ARM_BR_TAG) | (cond << ARMCOND_SHIFT))
+
+/* branch */
+#define ARM_B_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 0, cond))
+#define ARM_B(p, offs) ARM_B_COND((p), ARMCOND_AL, (offs))
+/* branch with link */
+#define ARM_BL_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 1, cond))
+#define ARM_BL(p, offs) ARM_BL_COND((p), ARMCOND_AL, (offs))
+
+#define ARM_DEF_BX(reg,sub,cond) (0x12fff << 8 | (reg) | ((sub) << 4) | ((cond) << ARMCOND_SHIFT))
+
+#define ARM_BX_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 1, cond))
+#define ARM_BX(p, reg) ARM_BX_COND((p), ARMCOND_AL, (reg))
+
+#define ARM_BLX_REG_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 3, cond))
+#define ARM_BLX_REG(p, reg) ARM_BLX_REG_COND((p), ARMCOND_AL, (reg))
+
+/* Data Processing Instructions - there are 3 types. */
+
+typedef struct {
+ arminstr_t imm : 8;
+ arminstr_t rot : 4;
+} ARMDPI_op2_imm;
+
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag : 1; /* 0 - immediate shift, 1 - reg shift */
+ arminstr_t type : 2; /* shift type - logical, arithmetic, rotate */
+} ARMDPI_op2_reg_shift;
+
+
+/* op2 is reg shift by imm */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t shift : 5;
+ } imm;
+} ARMDPI_op2_reg_imm;
+
+/* op2 is reg shift by reg */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t pad : 1; /* always 0, to differentiate from HXFER etc. */
+ arminstr_t rs : 4;
+ } reg;
+} ARMDPI_op2_reg_reg;
+
+/* Data processing instrs */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+
+ ARMDPI_op2_reg_shift op2_reg;
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ ARMDPI_op2_reg_reg op2_reg_reg;
+
+ struct {
+ arminstr_t op2 : 12; /* raw operand 2 */
+ arminstr_t rd : 4; /* destination reg */
+ arminstr_t rn : 4; /* first operand reg */
+ arminstr_t s : 1; /* S-bit controls PSR update */
+ arminstr_t opcode : 4; /* arithmetic/logic operation */
+ arminstr_t type : 1; /* type of op2, 0 = register, 1 = immediate */
+ arminstr_t tag : 2; /* 0 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrDPI;
+
+#define ARM_DPI_ID 0
+#define ARM_DPI_MASK 3 << 26
+#define ARM_DPI_TAG ARM_DPI_ID << 26
+
+#define ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, cond) \
+ ((imm8) & 0xFF) | \
+ (((rot) & 0xF) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (1 << 25) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+
+#define ARM_DEF_DPI_IMM(imm8, rot, rd, rn, s, op) \
+ ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, ARMCOND_AL)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+
+
+#define ARM_DEF_DPI_REG_IMMSHIFT_COND(rm, shift_type, imm_shift, rd, rn, s, op, cond) \
+ (rm) | \
+ ((shift_type & 3) << 5) | \
+ (((imm_shift) & 0x1F) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_IASM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM_EMIT(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+
+/* Rd := Rn op (Rm shift_type Rs) */
+#define ARM_DEF_DPI_REG_REGSHIFT_COND(rm, shift_type, rs, rd, rn, s, op, cond) \
+ (rm) | \
+ (1 << 4) | \
+ ((shift_type & 3) << 5) | \
+ ((rs) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond))
+
+
+
+/* Multiple register transfer. */
+typedef struct {
+ arminstr_t reg_list : 16; /* bitfield */
+ arminstr_t rn : 4; /* base reg */
+ arminstr_t ls : 1; /* load(1)/store(0) */
+ arminstr_t wb : 1; /* write-back "!" */
+ arminstr_t s : 1; /* restore PSR, force user bit */
+ arminstr_t u : 1; /* up/down */
+ arminstr_t p : 1; /* pre(1)/post(0) index */
+ arminstr_t tag : 3; /* 1 0 0 */
+ arminstr_t cond : 4;
+} ARMInstrMRT;
+
+#define ARM_MRT_ID 4
+#define ARM_MRT_MASK 7 << 25
+#define ARM_MRT_TAG ARM_MRT_ID << 25
+
+#define ARM_DEF_MRT(regs, rn, l, w, s, u, p, cond) \
+ (regs) | \
+ (rn << 16) | \
+ (l << 20) | \
+ (w << 21) | \
+ (s << 22) | \
+ (u << 23) | \
+ (p << 24) | \
+ (ARM_MRT_TAG) | \
+ ARM_DEF_COND(cond)
+
+
+#define ARM_LDM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_STM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 0, 0, 0, 1, 0, ARMCOND_AL))
+
+/* stmdb sp!, {regs} */
+#define ARM_PUSH(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+#define ARM_IASM_PUSH(regs) ARM_IASM(ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+
+/* ldmia sp!, {regs} */
+#define ARM_POP(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+
+/* ldmia sp, {regs} ; (no write-back) */
+#define ARM_POP_NWB(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP_NWB(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+
+#define ARM_PUSH1(p, r1) ARM_PUSH(p, (1 << r1))
+#define ARM_PUSH2(p, r1, r2) ARM_PUSH(p, (1 << r1) | (1 << r2))
+#define ARM_PUSH3(p, r1, r2, r3) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_PUSH4(p, r1, r2, r3, r4) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_PUSH5(p, r1, r2, r3, r4, r5) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_PUSH6(p, r1, r2, r3, r4, r5, r6) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_PUSH7(p, r1, r2, r3, r4, r5, r6, r7) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_PUSH8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+
+#define ARM_POP8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+#define ARM_POP7(p, r1, r2, r3, r4, r5, r6, r7) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_POP6(p, r1, r2, r3, r4, r5, r6) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_POP5(p, r1, r2, r3, r4, r5) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_POP4(p, r1, r2, r3, r4) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_POP3(p, r1, r2, r3) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_POP2(p, r1, r2) ARM_POP(p, (1 << r1) | (1 << r2))
+#define ARM_POP1(p, r1) ARM_POP(p, (1 << r1))
+
+
+/* Multiply instructions */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 4; /* 9 */
+ arminstr_t rs : 4;
+ arminstr_t rn : 4;
+ arminstr_t rd : 4;
+ arminstr_t s : 1;
+ arminstr_t opcode : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrMul;
+
+#define ARM_MUL_ID 0
+#define ARM_MUL_ID2 9
+#define ARM_MUL_MASK ((0xF << 24) | (0xF << 4))
+#define ARM_MUL_TAG ((ARM_MUL_ID << 24) | (ARM_MUL_ID2 << 4))
+
+#define ARM_DEF_MUL_COND(op, rd, rm, rs, rn, s, cond) \
+ (rm) | \
+ ((rs) << 8) | \
+ ((rn) << 12) | \
+ ((rd) << 16) | \
+ ((s & 1) << 17) | \
+ ((op & 7) << 18) | \
+ ARM_MUL_TAG | \
+ ARM_DEF_COND(cond)
+
+/* Rd := (Rm * Rs)[31:0]; 32 x 32 -> 32 */
+#define ARM_MUL_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_MUL(p, rd, rm, rs) \
+ ARM_MUL_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MULS_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_MULS(p, rd, rm, rs) \
+ ARM_MULS_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MUL_REG_REG(p, rd, rm, rs) ARM_MUL(p, rd, rm, rs)
+#define ARM_MULS_REG_REG(p, rd, rm, rs) ARM_MULS(p, rd, rm, rs)
+
+/* inline */
+#define ARM_IASM_MUL_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_IASM_MUL(rd, rm, rs) \
+ ARM_IASM_MUL_COND(rd, rm, rs, ARMCOND_AL)
+#define ARM_IASM_MULS_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_IASM_MULS(rd, rm, rs) \
+ ARM_IASM_MULS_COND(rd, rm, rs, ARMCOND_AL)
+
+
+/* Rd := (Rm * Rs) + Rn; 32x32+32->32 */
+#define ARM_MLA_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_MLA(p, rd, rm, rs, rn) \
+ ARM_MLA_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_MLAS_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_MLAS(p, rd, rm, rs, rn) \
+ ARM_MLAS_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+
+/* inline */
+#define ARM_IASM_MLA_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_IASM_MLA(rd, rm, rs, rn) \
+ ARM_IASM_MLA_COND(rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_IASM_MLAS_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_IASM_MLAS(rd, rm, rs, rn) \
+ ARM_IASM_MLAS_COND(rd, rm, rs, rn, ARMCOND_AL)
+
+
+
+/* Word/byte transfer */
+typedef union {
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ struct {
+ arminstr_t op2_imm : 12;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t b : 1;
+ arminstr_t u : 1; /* down(0) / up(1) */
+ arminstr_t p : 1; /* post-index(0) / pre-index(1) */
+ arminstr_t type : 1; /* imm(0) / register(1) */
+ arminstr_t tag : 2; /* 0 1 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrWXfer;
+
+#define ARM_WXFER_ID 1
+#define ARM_WXFER_MASK 3 << 26
+#define ARM_WXFER_TAG ARM_WXFER_ID << 26
+
+
+#define ARM_DEF_WXFER_IMM(imm12, rd, rn, ls, wb, b, p, cond) \
+ ((((int)imm12) < 0) ? -(int)(imm12) : (imm12)) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ (((int)(imm12) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_WXFER_MAX_OFFS 0xFFF
+
+/* this macro checks for imm12 bounds */
+#define ARM_EMIT_WXFER_IMM(ptr, imm12, rd, rn, ls, wb, b, p, cond) \
+ do { \
+ int _imm12 = (int)(imm12) < -ARM_WXFER_MAX_OFFS \
+ ? -ARM_WXFER_MAX_OFFS \
+ : (int)(imm12) > ARM_WXFER_MAX_OFFS \
+ ? ARM_WXFER_MAX_OFFS \
+ : (int)(imm12); \
+ ARM_EMIT((ptr), \
+ ARM_DEF_WXFER_IMM(_imm12, (rd), (rn), (ls), (wb), (b), (p), (cond))); \
+ } while (0)
+
+
+/* LDRx */
+/* immediate offset, post-index */
+#define ARM_LDR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 0, cond))
+
+#define ARM_LDR_IMM_POST(p, rd, rn, imm) ARM_LDR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 0, cond))
+
+#define ARM_LDRB_IMM_POST(p, rd, rn, imm) ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_LDR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+
+#define ARM_LDR_IMM(p, rd, rn, imm) ARM_LDR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+
+#define ARM_LDRB_IMM(p, rd, rn, imm) ARM_LDRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* STRx */
+/* immediate offset, post-index */
+#define ARM_STR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 0, cond))
+
+#define ARM_STR_IMM_POST(p, rd, rn, imm) ARM_STR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 0, cond))
+
+#define ARM_STRB_IMM_POST(p, rd, rn, imm) ARM_STRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_STR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)
+/* ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)) */
+
+#define ARM_STR_IMM(p, rd, rn, imm) ARM_STR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+
+#define ARM_STRB_IMM(p, rd, rn, imm) ARM_STRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* write-back */
+#define ARM_STR_IMM_WB_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 1, 0, 1, cond)
+#define ARM_STR_IMM_WB(p, rd, rn, imm) ARM_STR_IMM_WB_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, u, p, cond) \
+ (rm) | \
+ ((shift_type) << 5) | \
+ ((shift) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ (1 << 25) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_UP, p, cond)
+#define ARM_DEF_WXFER_REG_MINUS_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_DOWN, p, cond)
+
+
+#define ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+#define ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDR_REG_REG(p, rd, rn, rm) \
+ ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+#define ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDRB_REG_REG(p, rd, rn, rm) \
+ ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 0, 1, cond))
+#define ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STR_REG_REG(p, rd, rn, rm) \
+ ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+/* zero-extend */
+#define ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+#define ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STRB_REG_REG(p, rd, rn, rm) \
+ ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+
+/* ARMv4+ */
+/* Half-word or byte (signed) transfer. */
+typedef struct {
+ arminstr_t rm : 4; /* imm_lo */
+ arminstr_t tag3 : 1; /* 1 */
+ arminstr_t h : 1; /* half-word or byte */
+ arminstr_t s : 1; /* sign-extend or zero-extend */
+ arminstr_t tag2 : 1; /* 1 */
+ arminstr_t imm_hi : 4;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t type : 1; /* imm(1) / reg(0) */
+ arminstr_t u : 1; /* +- */
+ arminstr_t p : 1; /* pre/post-index */
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrHXfer;
+
+#define ARM_HXFER_ID 0
+#define ARM_HXFER_ID2 1
+#define ARM_HXFER_ID3 1
+#define ARM_HXFER_MASK ((0x7 << 25) | (0x9 << 4))
+#define ARM_HXFER_TAG ((ARM_HXFER_ID << 25) | (ARM_HXFER_ID2 << 7) | (ARM_HXFER_ID3 << 4))
+
+#define ARM_DEF_HXFER_IMM_COND(imm, h, s, rd, rn, ls, wb, p, cond) \
+ ((imm) < 0?(-(imm)) & 0xF:(imm) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((imm) < 0?((-(imm)) << 4) & 0xF00:((imm) << 4) & 0xF00) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (1 << 22) | \
+ (((int)(imm) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_LDRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_IMM(p, rd, rn, imm) \
+ ARM_LDRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_IMM(p, rd, rn, imm) \
+ ARM_LDRSH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_IMM(p, rd, rn, imm) \
+ ARM_LDRSB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_STRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_IMM(p, rd, rn, imm) \
+ ARM_STRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, u, p, cond) \
+ ((rm) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (0 << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_HXFER_REG_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_UP, p, cond)
+#define ARM_DEF_HXFER_REG_MINUS_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_DOWN, p, cond)
+
+#define ARM_LDRH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_REG_REG(p, rd, rm, rn) \
+ ARM_LDRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+#define ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_REG_REG(p, rd, rm, rn) \
+ ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+#define ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_REG_REG(p, rd, rm, rn) ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+
+#define ARM_STRH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_REG_REG(p, rd, rm, rn) \
+ ARM_STRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+
+
+
+/* Swap */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag3 : 8; /* 0x9 */
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t tag2 : 2;
+ arminstr_t b : 1;
+ arminstr_t tag : 5; /* 0x2 */
+ arminstr_t cond : 4;
+} ARMInstrSwap;
+
+#define ARM_SWP_ID 2
+#define ARM_SWP_ID2 9
+#define ARM_SWP_MASK ((0x1F << 23) | (3 << 20) | (0xFF << 4))
+#define ARM_SWP_TAG ((ARM_SWP_ID << 23) | (ARM_SWP_ID2 << 4))
+
+
+
+/* Software interrupt */
+typedef struct {
+ arminstr_t num : 24;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrSWI;
+
+#define ARM_SWI_ID 0xF
+#define ARM_SWI_MASK (0xF << 24)
+#define ARM_SWI_TAG (ARM_SWI_ID << 24)
+
+
+
+/* Co-processor Data Processing */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1; /* 0 */
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4; /* CP number */
+ arminstr_t crd : 4;
+ arminstr_t crn : 4;
+ arminstr_t op : 4;
+ arminstr_t tag : 4; /* 0xE */
+ arminstr_t cond : 4;
+} ARMInstrCDP;
+
+#define ARM_CDP_ID 0xE
+#define ARM_CDP_ID2 0
+#define ARM_CDP_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CDP_TAG ((ARM_CDP_ID << 24) | (ARM_CDP_ID2 << 4))
+
+
+/* Co-processor Data Transfer (ldc/stc) */
+typedef struct {
+ arminstr_t offs : 8;
+ arminstr_t cpn : 4;
+ arminstr_t crd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t n : 1;
+ arminstr_t u : 1;
+ arminstr_t p : 1;
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrCDT;
+
+#define ARM_CDT_ID 6
+#define ARM_CDT_MASK (7 << 25)
+#define ARM_CDT_TAG (ARM_CDT_ID << 25)
+
+
+/* Co-processor Register Transfer (mcr/mrc) */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1;
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4;
+ arminstr_t rd : 4;
+ arminstr_t crn : 4;
+ arminstr_t ls : 1;
+ arminstr_t op1 : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrCRT;
+
+#define ARM_CRT_ID 0xE
+#define ARM_CRT_ID2 0x1
+#define ARM_CRT_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CRT_TAG ((ARM_CRT_ID << 24) | (ARM_CRT_ID2 << 4))
+
+/* Move register to PSR. */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+ struct {
+ arminstr_t rm : 4;
+ arminstr_t pad : 8; /* 0 */
+ arminstr_t tag4 : 4; /* 0xF */
+ arminstr_t fld : 4;
+ arminstr_t tag3 : 2; /* 0x2 */
+ arminstr_t sel : 1;
+ arminstr_t tag2 : 2; /* 0x2 */
+ arminstr_t type : 1;
+ arminstr_t tag : 2; /* 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrMSR;
+
+#define ARM_MSR_ID 0
+#define ARM_MSR_ID2 2
+#define ARM_MSR_ID3 2
+#define ARM_MSR_ID4 0xF
+#define ARM_MSR_MASK ((3 << 26) | \
+ (3 << 23) | \
+ (3 << 20) | \
+ (0xF << 12))
+#define ARM_MSR_TAG ((ARM_MSR_ID << 26) | \
+ (ARM_MSR_ID2 << 23) | \
+ (ARM_MSR_ID3 << 20) | \
+ (ARM_MSR_ID4 << 12))
+
+
+/* Move PSR to register. */
+typedef struct {
+ arminstr_t tag3 : 12;
+ arminstr_t rd : 4;
+ arminstr_t tag2 : 6;
+ arminstr_t sel : 1; /* CPSR | SPSR */
+ arminstr_t tag : 5;
+ arminstr_t cond : 4;
+} ARMInstrMRS;
+
+#define ARM_MRS_ID 2
+#define ARM_MRS_ID2 0xF
+#define ARM_MRS_ID3 0
+#define ARM_MRS_MASK ((0x1F << 23) | (0x3F << 16) | 0xFFF)
+#define ARM_MRS_TAG ((ARM_MRS_ID << 23) | (ARM_MRS_ID2 << 16) | ARM_MRS_ID3)
+
+
+
+#include "mono/arch/arm/arm_dpimacros.h"
+
+#define ARM_NOP(p) ARM_MOV_REG_REG(p, ARMREG_R0, ARMREG_R0)
+
+
+#define ARM_SHL_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHL_IMM(p, rd, rm, imm) \
+ ARM_SHL_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHLS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHLS_IMM(p, rd, rm, imm) \
+ ARM_SHLS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHR_IMM(p, rd, rm, imm) \
+ ARM_SHR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHRS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHRS_IMM(p, rd, rm, imm) \
+ ARM_SHRS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SAR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SAR_IMM(p, rd, rm, imm) \
+ ARM_SAR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SARS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SARS_IMM(p, rd, rm, imm) \
+ ARM_SARS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_ROR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_ROR_IMM(p, rd, rm, imm) \
+ ARM_ROR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_RORS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_RORS_IMM(p, rd, rm, imm) \
+ ARM_RORS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHL_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHL_REG(p, rd, rm, rs) \
+ ARM_SHL_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHLS_REG(p, rd, rm, rs) \
+ ARM_SHLS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_REG(p, rd, rm, rs) ARM_SHLS_REG(p, rd, rm, rs)
+
+#define ARM_SHR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHR_REG(p, rd, rm, rs) \
+ ARM_SHR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHRS_REG(p, rd, rm, rs) \
+ ARM_SHRS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_REG(p, rd, rm, rs) ARM_SHRS_REG(p, rd, rm, rs)
+
+#define ARM_SAR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SAR_REG(p, rd, rm, rs) \
+ ARM_SAR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SARS_REG(p, rd, rm, rs) \
+ ARM_SARS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_REG(p, rd, rm, rs) ARM_SARS_REG(p, rd, rm, rs)
+
+#define ARM_ROR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_ROR_REG(p, rd, rm, rs) \
+ ARM_ROR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_RORS_REG(p, rd, rm, rs) \
+ ARM_RORS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_REG(p, rd, rm, rs) ARM_RORS_REG(p, rd, rm, rs)
+
+#ifdef __native_client_codegen__
+#define ARM_DBRK(p) ARM_EMIT(p, 0xE7FEDEF0)
+#else
+#define ARM_DBRK(p) ARM_EMIT(p, 0xE6000010)
+#endif
+#define ARM_IASM_DBRK() ARM_IASM_EMIT(0xE6000010)
+
+#define ARM_INC(p, reg) ARM_ADD_REG_IMM8(p, reg, reg, 1)
+#define ARM_DEC(p, reg) ARM_SUB_REG_IMM8(p, reg, reg, 1)
+
+#define ARM_MLS(p, rd, rn, rm, ra) ARM_EMIT((p), (ARMCOND_AL << 28) | (0x6 << 20) | ((rd) << 16) | ((ra) << 12) | ((rm) << 8) | (0x9 << 4) | ((rn) << 0))
+
+/* ARM V5 */
+
+/* Count leading zeros, CLZ{cond} Rd, Rm */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 8;
+ arminstr_t rd : 4;
+ arminstr_t tag : 12;
+ arminstr_t cond : 4;
+} ARMInstrCLZ;
+
+#define ARM_CLZ_ID 0x16F
+#define ARM_CLZ_ID2 0xF1
+#define ARM_CLZ_MASK ((0xFFF << 16) | (0xFF < 4))
+#define ARM_CLZ_TAG ((ARM_CLZ_ID << 16) | (ARM_CLZ_ID2 << 4))
+
+
+
+
+typedef union {
+ ARMInstrBR br;
+ ARMInstrDPI dpi;
+ ARMInstrMRT mrt;
+ ARMInstrMul mul;
+ ARMInstrWXfer wxfer;
+ ARMInstrHXfer hxfer;
+ ARMInstrSwap swp;
+ ARMInstrCDP cdp;
+ ARMInstrCDT cdt;
+ ARMInstrCRT crt;
+ ARMInstrSWI swi;
+ ARMInstrMSR msr;
+ ARMInstrMRS mrs;
+ ARMInstrCLZ clz;
+
+ ARMInstrGeneric generic;
+ arminstr_t raw;
+} ARMInstr;
+
+/* ARMv6t2 */
+
+#define ARM_MOVW_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (0 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff)))
+#define ARM_MOVW_REG_IMM(p, rd, imm16) ARM_MOVW_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL)
+
+#define ARM_MOVT_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (4 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff)))
+#define ARM_MOVT_REG_IMM(p, rd, imm16) ARM_MOVT_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL)
+
+/* MCR */
+#define ARM_DEF_MCR_COND(coproc, opc1, rt, crn, crm, opc2, cond) \
+ ARM_DEF_COND ((cond)) | ((0xe << 24) | (((opc1) & 0x7) << 21) | (0 << 20) | (((crn) & 0xf) << 16) | (((rt) & 0xf) << 12) | (((coproc) & 0xf) << 8) | (((opc2) & 0x7) << 5) | (1 << 4) | (((crm) & 0xf) << 0))
+
+#define ARM_MCR_COND(p, coproc, opc1, rt, crn, crm, opc2, cond) \
+ ARM_EMIT(p, ARM_DEF_MCR_COND ((coproc), (opc1), (rt), (crn), (crm), (opc2), (cond)))
+
+#define ARM_MCR(p, coproc, opc1, rt, crn, crm, opc2) \
+ ARM_MCR_COND ((p), (coproc), (opc1), (rt), (crn), (crm), (opc2), ARMCOND_AL)
+
+/* ARMv7VE */
+#define ARM_SDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x1 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0)))
+#define ARM_SDIV(p, rd, rn, rm) ARM_SDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL)
+
+#define ARM_UDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x3 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0)))
+#define ARM_UDIV(p, rd, rn, rm) ARM_UDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL)
+
+/* ARMv7 */
+
+typedef enum {
+ ARM_DMB_SY = 0xf,
+} ArmDmbFlags;
+
+#define ARM_DMB(p, option) ARM_EMIT ((p), ((0xf << 28) | (0x57 << 20) | (0xf << 16) | (0xf << 12) | (0x0 << 8) | (0x5 << 4) | ((option) << 0)))
+
+#define ARM_LDREX_REG(p, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x1 << 20) | ((rn) << 16) | ((rt) << 12)) | (0xf << 8) | (0x9 << 4) | 0xf << 0)
+
+#define ARM_STREX_REG(p, rd, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x0 << 20) | ((rn) << 16) | ((rd) << 12)) | (0xf << 8) | (0x9 << 4) | ((rt) << 0))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ARM_H */
+
diff --git a/lib/ffts/src/arch/arm/arm-dis.c b/lib/ffts/src/arch/arm/arm-dis.c
new file mode 100644
index 0000000..5074f26
--- /dev/null
+++ b/lib/ffts/src/arch/arm/arm-dis.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+
+#include <stdarg.h>
+
+#include "arm-dis.h"
+#include "arm-codegen.h"
+
+
+static ARMDis* gdisasm = NULL;
+
+static int use_reg_alias = 1;
+
+const static char* cond[] = {
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "", "nv"
+};
+
+const static char* ops[] = {
+ "and", "eor", "sub", "rsb", "add", "adc", "sbc", "rsc",
+ "tst", "teq", "cmp", "cmn", "orr", "mov", "bic", "mvn"
+};
+
+const static char* shift_types[] = {"lsl", "lsr", "asr", "ror"};
+
+const static char* mul_ops[] = {
+ "mul", "mla", "?", "?", "umull", "umlal", "smull", "smlal"
+};
+
+const static char* reg_alias[] = {
+ "a1", "a2", "a3", "a4",
+ "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "fp", "ip", "sp", "lr", "pc"
+};
+
+const static char* msr_fld[] = {"f", "c", "x", "?", "s"};
+
+
+/* private functions prototypes (to keep compiler happy) */
+void chk_out(ARMDis* dis);
+void dump_reg(ARMDis* dis, int reg);
+void dump_creg(ARMDis* dis, int creg);
+void dump_reglist(ARMDis* dis, int reg_list);
+void init_gdisasm(void);
+
+void dump_br(ARMDis* dis, ARMInstr i);
+void dump_cdp(ARMDis* dis, ARMInstr i);
+void dump_cdt(ARMDis* dis, ARMInstr i);
+void dump_crt(ARMDis* dis, ARMInstr i);
+void dump_dpi(ARMDis* dis, ARMInstr i);
+void dump_hxfer(ARMDis* dis, ARMInstr i);
+void dump_mrs(ARMDis* dis, ARMInstr i);
+void dump_mrt(ARMDis* dis, ARMInstr i);
+void dump_msr(ARMDis* dis, ARMInstr i);
+void dump_mul(ARMDis* dis, ARMInstr i);
+void dump_swi(ARMDis* dis, ARMInstr i);
+void dump_swp(ARMDis* dis, ARMInstr i);
+void dump_wxfer(ARMDis* dis, ARMInstr i);
+void dump_clz(ARMDis* dis, ARMInstr i);
+
+
+/*
+void out(ARMDis* dis, const char* format, ...) {
+ va_list arglist;
+ va_start(arglist, format);
+ fprintf(dis->dis_out, format, arglist);
+ va_end(arglist);
+}
+*/
+
+
+void chk_out(ARMDis* dis) {
+ if (dis != NULL && dis->dis_out == NULL) dis->dis_out = stdout;
+}
+
+
+void armdis_set_output(ARMDis* dis, FILE* f) {
+ if (dis != NULL) {
+ dis->dis_out = f;
+ chk_out(dis);
+ }
+}
+
+FILE* armdis_get_output(ARMDis* dis) {
+ return (dis != NULL ? dis->dis_out : NULL);
+}
+
+
+
+
+void dump_reg(ARMDis* dis, int reg) {
+ reg &= 0xF;
+ if (!use_reg_alias || (reg > 3 && reg < 11)) {
+ fprintf(dis->dis_out, "r%d", reg);
+ } else {
+ fprintf(dis->dis_out, "%s", reg_alias[reg]);
+ }
+}
+
+void dump_creg(ARMDis* dis, int creg) {
+ if (dis != NULL) {
+ creg &= 0xF;
+ fprintf(dis->dis_out, "c%d", creg);
+ }
+}
+
+void dump_reglist(ARMDis* dis, int reg_list) {
+ int i = 0, j, n = 0;
+ int m1 = 1, m2, rn;
+ while (i < 16) {
+ if ((reg_list & m1) != 0) {
+ if (n != 0) fprintf(dis->dis_out, ", ");
+ n++;
+ dump_reg(dis, i);
+ for (j = i+1, rn = 0, m2 = m1<<1; j < 16; ++j, m2<<=1) {
+ if ((reg_list & m2) != 0) ++rn;
+ else break;
+ }
+ i+=rn;
+ if (rn > 1) {
+ fprintf(dis->dis_out, "-");
+ dump_reg(dis, i);
+ } else if (rn == 1) {
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i);
+ }
+ m1<<=(rn+1);
+ i++;
+ } else {
+ ++i;
+ m1<<=1;
+ }
+ }
+}
+
+
+void dump_br(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "b%s%s\t%x\t; %p -> %#x",
+ (i.br.link == 1) ? "l" : "",
+ cond[i.br.cond], i.br.offset, dis->pi, (int)dis->pi + 4*2 + ((int)(i.br.offset << 8) >> 6));
+}
+
+
+void dump_dpi(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s", ops[i.dpi.all.opcode], cond[i.dpi.all.cond]);
+
+ if ((i.dpi.all.opcode < ARMOP_TST || i.dpi.all.opcode > ARMOP_CMN) && (i.dpi.all.s != 0)) {
+ fprintf(dis->dis_out, "s");
+ }
+
+ fprintf(dis->dis_out, "\t");
+
+ if ((i.dpi.all.opcode < ARMOP_TST) || (i.dpi.all.opcode > ARMOP_CMN)) {
+ /* for comparison operations Rd is ignored */
+ dump_reg(dis, i.dpi.all.rd);
+ fprintf(dis->dis_out, ", ");
+ }
+
+ if ((i.dpi.all.opcode != ARMOP_MOV) && (i.dpi.all.opcode != ARMOP_MVN)) {
+ /* for MOV/MVN Rn is ignored */
+ dump_reg(dis, i.dpi.all.rn);
+ fprintf(dis->dis_out, ", ");
+ }
+
+ if (i.dpi.all.type == 1) {
+ /* immediate */
+ if (i.dpi.op2_imm.rot != 0) {
+ fprintf(dis->dis_out, "#%d, %d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.rot << 1,
+ ARM_SCALE(i.dpi.op2_imm.imm, (i.dpi.op2_imm.rot << 1)) );
+ } else {
+ fprintf(dis->dis_out, "#%d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.imm);
+ }
+ } else {
+ /* reg-reg */
+ if (i.dpi.op2_reg.tag == 0) {
+ /* op2 is reg shift by imm */
+ dump_reg(dis, i.dpi.op2_reg_imm.r2.rm);
+ if (i.dpi.op2_reg_imm.imm.shift != 0) {
+ fprintf(dis->dis_out, " %s #%d", shift_types[i.dpi.op2_reg_imm.r2.type], i.dpi.op2_reg_imm.imm.shift);
+ }
+ } else {
+ /* op2 is reg shift by reg */
+ dump_reg(dis, i.dpi.op2_reg_reg.r2.rm);
+ fprintf(dis->dis_out, " %s ", shift_types[i.dpi.op2_reg_reg.r2.type]);
+ dump_reg(dis, i.dpi.op2_reg_reg.reg.rs);
+ }
+
+ }
+}
+
+void dump_wxfer(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t",
+ (i.wxfer.all.ls == 0) ? "str" : "ldr",
+ cond[i.generic.cond],
+ (i.wxfer.all.b == 0) ? "" : "b",
+ (i.wxfer.all.ls != 0 && i.wxfer.all.wb != 0) ? "t" : "");
+ dump_reg(dis, i.wxfer.all.rd);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.wxfer.all.rn);
+ fprintf(dis->dis_out, "%s, ", (i.wxfer.all.p == 0) ? "]" : "");
+
+ if (i.wxfer.all.type == 0) { /* imm */
+ fprintf(dis->dis_out, "#%s%d", (i.wxfer.all.u == 0) ? "-" : "", i.wxfer.all.op2_imm);
+ } else {
+ dump_reg(dis, i.wxfer.op2_reg_imm.r2.rm);
+ if (i.wxfer.op2_reg_imm.imm.shift != 0) {
+ fprintf(dis->dis_out, " %s #%d", shift_types[i.wxfer.op2_reg_imm.r2.type], i.wxfer.op2_reg_imm.imm.shift);
+ }
+ }
+
+ if (i.wxfer.all.p != 0) {
+ /* close pre-index instr, also check for write-back */
+ fprintf(dis->dis_out, "]%s", (i.wxfer.all.wb != 0) ? "!" : "");
+ }
+}
+
+void dump_hxfer(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t",
+ (i.hxfer.ls == 0) ? "str" : "ldr",
+ cond[i.generic.cond],
+ (i.hxfer.s != 0) ? "s" : "",
+ (i.hxfer.h != 0) ? "h" : "b");
+ dump_reg(dis, i.hxfer.rd);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.hxfer.rn);
+ fprintf(dis->dis_out, "%s, ", (i.hxfer.p == 0) ? "]" : "");
+
+ if (i.hxfer.type != 0) { /* imm */
+ fprintf(dis->dis_out, "#%s%d", (i.hxfer.u == 0) ? "-" : "", (i.hxfer.imm_hi << 4) | i.hxfer.rm);
+ } else {
+ dump_reg(dis, i.hxfer.rm);
+ }
+
+ if (i.hxfer.p != 0) {
+ /* close pre-index instr, also check for write-back */
+ fprintf(dis->dis_out, "]%s", (i.hxfer.wb != 0) ? "!" : "");
+ }
+}
+
+
+void dump_mrt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t", (i.mrt.ls == 0) ? "stm" : "ldm", cond[i.mrt.cond],
+ (i.mrt.u == 0) ? "d" : "i", (i.mrt.p == 0) ? "a" : "b");
+ dump_reg(dis, i.mrt.rn);
+ fprintf(dis->dis_out, "%s, {", (i.mrt.wb != 0) ? "!" : "");
+ dump_reglist(dis, i.mrt.reg_list);
+ fprintf(dis->dis_out, "}");
+}
+
+
+void dump_swp(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "swp%s%s ", cond[i.swp.cond], (i.swp.b != 0) ? "b" : "");
+ dump_reg(dis, i.swp.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.swp.rm);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.swp.rn);
+ fprintf(dis->dis_out, "]");
+}
+
+
+void dump_mul(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s\t", mul_ops[i.mul.opcode], cond[i.mul.cond], (i.mul.s != 0) ? "s" : "");
+ switch (i.mul.opcode) {
+ case ARMOP_MUL:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ break;
+ case ARMOP_MLA:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rn);
+ break;
+ case ARMOP_UMULL:
+ case ARMOP_UMLAL:
+ case ARMOP_SMULL:
+ case ARMOP_SMLAL:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rn);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ break;
+ default:
+ fprintf(dis->dis_out, "DCD 0x%x\t; <unknown>", i.raw);
+ break;
+ }
+}
+
+
+void dump_cdp(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "cdp%s\tp%d, %d, ", cond[i.generic.cond], i.cdp.cpn, i.cdp.op);
+ dump_creg(dis, i.cdp.crd);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.cdp.crn);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.cdp.crm);
+
+ if (i.cdp.op2 != 0) {
+ fprintf(dis->dis_out, ", %d", i.cdp.op2);
+ }
+}
+
+
+void dump_cdt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s\tp%d, ", (i.cdt.ls == 0) ? "stc" : "ldc",
+ cond[i.generic.cond], (i.cdt.n != 0) ? "l" : "", i.cdt.cpn);
+ dump_creg(dis, i.cdt.crd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.cdt.rn);
+
+ if (i.cdt.p == 0) {
+ fprintf(dis->dis_out, "]");
+ }
+
+ if (i.cdt.offs != 0) {
+ fprintf(dis->dis_out, ", #%d", i.cdt.offs);
+ }
+
+ if (i.cdt.p != 0) {
+ fprintf(dis->dis_out, "]%s", (i.cdt.wb != 0) ? "!" : "");
+ }
+}
+
+
+void dump_crt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s\tp%d, %d, ", (i.crt.ls == 0) ? "mrc" : "mcr",
+ cond[i.generic.cond], i.crt.cpn, i.crt.op1);
+ dump_reg(dis, i.crt.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.crt.crn);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.crt.crm);
+
+ if (i.crt.op2 != 0) {
+ fprintf(dis->dis_out, ", %d", i.crt.op2);
+ }
+}
+
+
+void dump_msr(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "msr%s\t%spsr_, ", cond[i.generic.cond],
+ (i.msr.all.sel == 0) ? "s" : "c");
+ if (i.msr.all.type == 0) {
+ /* reg */
+ fprintf(dis->dis_out, "%s, ", msr_fld[i.msr.all.fld]);
+ dump_reg(dis, i.msr.all.rm);
+ } else {
+ /* imm */
+ fprintf(dis->dis_out, "f, #%d", i.msr.op2_imm.imm << i.msr.op2_imm.rot);
+ }
+}
+
+
+void dump_mrs(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "mrs%s\t", cond[i.generic.cond]);
+ dump_reg(dis, i.mrs.rd);
+ fprintf(dis->dis_out, ", %spsr", (i.mrs.sel == 0) ? "s" : "c");
+}
+
+
+void dump_swi(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "swi%s\t%d", cond[i.generic.cond], i.swi.num);
+}
+
+
+void dump_clz(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "clz\t");
+ dump_reg(dis, i.clz.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.clz.rm);
+ fprintf(dis->dis_out, "\n");
+}
+
+
+
+void armdis_decode(ARMDis* dis, void* p, int size) {
+ int i;
+ arminstr_t* pi = (arminstr_t*)p;
+ ARMInstr instr;
+
+ if (dis == NULL) return;
+
+ chk_out(dis);
+
+ size/=sizeof(arminstr_t);
+
+ for (i=0; i<size; ++i) {
+ fprintf(dis->dis_out, "%p:\t%08x\t", pi, *pi);
+ dis->pi = pi;
+ instr.raw = *pi++;
+
+ if ((instr.raw & ARM_BR_MASK) == ARM_BR_TAG) {
+ dump_br(dis, instr);
+ } else if ((instr.raw & ARM_SWP_MASK) == ARM_SWP_TAG) {
+ dump_swp(dis, instr);
+ } else if ((instr.raw & ARM_MUL_MASK) == ARM_MUL_TAG) {
+ dump_mul(dis, instr);
+ } else if ((instr.raw & ARM_CLZ_MASK) == ARM_CLZ_TAG) {
+ dump_clz(dis, instr);
+ } else if ((instr.raw & ARM_WXFER_MASK) == ARM_WXFER_TAG) {
+ dump_wxfer(dis, instr);
+ } else if ((instr.raw & ARM_HXFER_MASK) == ARM_HXFER_TAG) {
+ dump_hxfer(dis, instr);
+ } else if ((instr.raw & ARM_DPI_MASK) == ARM_DPI_TAG) {
+ dump_dpi(dis, instr);
+ } else if ((instr.raw & ARM_MRT_MASK) == ARM_MRT_TAG) {
+ dump_mrt(dis, instr);
+ } else if ((instr.raw & ARM_CDP_MASK) == ARM_CDP_TAG) {
+ dump_cdp(dis, instr);
+ } else if ((instr.raw & ARM_CDT_MASK) == ARM_CDT_TAG) {
+ dump_cdt(dis, instr);
+ } else if ((instr.raw & ARM_CRT_MASK) == ARM_CRT_TAG) {
+ dump_crt(dis, instr);
+ } else if ((instr.raw & ARM_MSR_MASK) == ARM_MSR_TAG) {
+ dump_msr(dis, instr);
+ } else if ((instr.raw & ARM_MRS_MASK) == ARM_MRS_TAG) {
+ dump_mrs(dis, instr);
+ } else if ((instr.raw & ARM_SWI_MASK) == ARM_SWI_TAG) {
+ dump_swi(dis, instr);
+ } else {
+ fprintf(dis->dis_out, "DCD 0x%x\t; <unknown>", instr.raw);
+ }
+
+ fprintf(dis->dis_out, "\n");
+ }
+}
+
+
+void armdis_open(ARMDis* dis, const char* dump_name) {
+ if (dis != NULL && dump_name != NULL) {
+ armdis_set_output(dis, fopen(dump_name, "w"));
+ }
+}
+
+
+void armdis_close(ARMDis* dis) {
+ if (dis->dis_out != NULL && dis->dis_out != stdout && dis->dis_out != stderr) {
+ fclose(dis->dis_out);
+ dis->dis_out = NULL;
+ }
+}
+
+
+void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size) {
+ armdis_open(dis, dump_name);
+ armdis_decode(dis, p, size);
+ armdis_close(dis);
+}
+
+
+void armdis_init(ARMDis* dis) {
+ if (dis != NULL) {
+ /* set to stdout */
+ armdis_set_output(dis, NULL);
+ }
+}
+
+
+
+
+void init_gdisasm() {
+ if (gdisasm == NULL) {
+ gdisasm = (ARMDis*)malloc(sizeof(ARMDis));
+ armdis_init(gdisasm);
+ }
+}
+
+void _armdis_set_output(FILE* f) {
+ init_gdisasm();
+ armdis_set_output(gdisasm, f);
+}
+
+FILE* _armdis_get_output() {
+ init_gdisasm();
+ return armdis_get_output(gdisasm);
+}
+
+void _armdis_decode(void* p, int size) {
+ init_gdisasm();
+ armdis_decode(gdisasm, p, size);
+}
+
+void _armdis_open(const char* dump_name) {
+ init_gdisasm();
+ armdis_open(gdisasm, dump_name);
+}
+
+void _armdis_close() {
+ init_gdisasm();
+ armdis_close(gdisasm);
+}
+
+void _armdis_dump(const char* dump_name, void* p, int size) {
+ init_gdisasm();
+ armdis_dump(gdisasm, dump_name, p, size);
+}
+
diff --git a/lib/ffts/src/arch/arm/arm-dis.h b/lib/ffts/src/arch/arm/arm-dis.h
new file mode 100644
index 0000000..8019499
--- /dev/null
+++ b/lib/ffts/src/arch/arm/arm-dis.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+#ifndef ARM_DIS
+#define ARM_DIS
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _ARMDis {
+ FILE* dis_out;
+ void* pi;
+} ARMDis;
+
+
+void _armdis_set_output(FILE* f);
+FILE* _armdis_get_output(void);
+void _armdis_decode(void* p, int size);
+void _armdis_open(const char* dump_name);
+void _armdis_close(void);
+void _armdis_dump(const char* dump_name, void* p, int size);
+
+
+void armdis_init(ARMDis* dis);
+void armdis_set_output(ARMDis* dis, FILE* f);
+FILE* armdis_get_output(ARMDis* dis);
+void armdis_decode(ARMDis* dis, void* p, int size);
+void armdis_open(ARMDis* dis, const char* dump_name);
+void armdis_close(ARMDis* dis);
+void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ARM_DIS */
diff --git a/lib/ffts/src/arch/arm/arm-vfp-codegen.h b/lib/ffts/src/arch/arm/arm-vfp-codegen.h
new file mode 100644
index 0000000..8b56b00
--- /dev/null
+++ b/lib/ffts/src/arch/arm/arm-vfp-codegen.h
@@ -0,0 +1,247 @@
+//
+// Copyright 2011 Xamarin Inc
+//
+
+#ifndef __MONO_ARM_VFP_CODEGEN_H__
+#define __MONO_ARM_VFP_CODEGEN_H__
+
+#include "arm-codegen.h"
+
+enum {
+ /* VFP registers */
+ ARM_VFP_F0,
+ ARM_VFP_F1,
+ ARM_VFP_F2,
+ ARM_VFP_F3,
+ ARM_VFP_F4,
+ ARM_VFP_F5,
+ ARM_VFP_F6,
+ ARM_VFP_F7,
+ ARM_VFP_F8,
+ ARM_VFP_F9,
+ ARM_VFP_F10,
+ ARM_VFP_F11,
+ ARM_VFP_F12,
+ ARM_VFP_F13,
+ ARM_VFP_F14,
+ ARM_VFP_F15,
+ ARM_VFP_F16,
+ ARM_VFP_F17,
+ ARM_VFP_F18,
+ ARM_VFP_F19,
+ ARM_VFP_F20,
+ ARM_VFP_F21,
+ ARM_VFP_F22,
+ ARM_VFP_F23,
+ ARM_VFP_F24,
+ ARM_VFP_F25,
+ ARM_VFP_F26,
+ ARM_VFP_F27,
+ ARM_VFP_F28,
+ ARM_VFP_F29,
+ ARM_VFP_F30,
+ ARM_VFP_F31,
+
+ ARM_VFP_D0 = ARM_VFP_F0,
+ ARM_VFP_D1 = ARM_VFP_F2,
+ ARM_VFP_D2 = ARM_VFP_F4,
+ ARM_VFP_D3 = ARM_VFP_F6,
+ ARM_VFP_D4 = ARM_VFP_F8,
+ ARM_VFP_D5 = ARM_VFP_F10,
+ ARM_VFP_D6 = ARM_VFP_F12,
+ ARM_VFP_D7 = ARM_VFP_F14,
+ ARM_VFP_D8 = ARM_VFP_F16,
+ ARM_VFP_D9 = ARM_VFP_F18,
+ ARM_VFP_D10 = ARM_VFP_F20,
+ ARM_VFP_D11 = ARM_VFP_F22,
+ ARM_VFP_D12 = ARM_VFP_F24,
+ ARM_VFP_D13 = ARM_VFP_F26,
+ ARM_VFP_D14 = ARM_VFP_F28,
+ ARM_VFP_D15 = ARM_VFP_F30,
+
+ ARM_VFP_COPROC_SINGLE = 10,
+ ARM_VFP_COPROC_DOUBLE = 11,
+
+#define ARM_VFP_OP(p,q,r,s) (((p) << 23) | ((q) << 21) | ((r) << 20) | ((s) << 6))
+#define ARM_VFP_OP2(Fn,N) (ARM_VFP_OP (1,1,1,1) | ((Fn) << 16) | ((N) << 7))
+
+ ARM_VFP_MUL = ARM_VFP_OP (0,1,0,0),
+ ARM_VFP_NMUL = ARM_VFP_OP (0,1,0,1),
+ ARM_VFP_ADD = ARM_VFP_OP (0,1,1,0),
+ ARM_VFP_SUB = ARM_VFP_OP (0,1,1,1),
+ ARM_VFP_DIV = ARM_VFP_OP (1,0,0,0),
+
+ ARM_VFP_CPY = ARM_VFP_OP2 (0,0),
+ ARM_VFP_ABS = ARM_VFP_OP2 (0,1),
+ ARM_VFP_NEG = ARM_VFP_OP2 (1,0),
+ ARM_VFP_SQRT = ARM_VFP_OP2 (1,1),
+ ARM_VFP_CMP = ARM_VFP_OP2 (4,0),
+ ARM_VFP_CMPE = ARM_VFP_OP2 (4,1),
+ ARM_VFP_CMPZ = ARM_VFP_OP2 (5,0),
+ ARM_VFP_CMPEZ = ARM_VFP_OP2 (5,1),
+ ARM_VFP_CVT = ARM_VFP_OP2 (7,1),
+ ARM_VFP_UITO = ARM_VFP_OP2 (8,0),
+ ARM_VFP_SITO = ARM_VFP_OP2 (8,1),
+ ARM_VFP_TOUI = ARM_VFP_OP2 (12,0),
+ ARM_VFP_TOSI = ARM_VFP_OP2 (13,0),
+ ARM_VFP_TOUIZ = ARM_VFP_OP2 (12,1),
+ ARM_VFP_TOSIZ = ARM_VFP_OP2 (13,1),
+
+ ARM_VFP_SID = 0,
+ ARM_VFP_SCR = 1 << 1,
+ ARM_VFP_EXC = 8 << 1
+};
+
+#define ARM_DEF_VFP_DYADIC(cond,cp,op,Fd,Fn,Fm) \
+ (14 << 24) | \
+ ((cp) << 8) | \
+ (op) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ (((Fn) >> 1) << 16) | \
+ (((Fn) & 1) << 7) | \
+ (((Fm) >> 1) << 0) | \
+ (((Fm) & 1) << 5) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_MONADIC(cond,cp,op,Fd,Fm) \
+ (14 << 24) | \
+ ((cp) << 8) | \
+ (op) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ (((Fm) >> 1) << 0) | \
+ (((Fm) & 1) << 5) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_LSF(cond,cp,post,ls,wback,basereg,Fd,offset) \
+ ((offset) >= 0? (offset)>>2: -(offset)>>2) | \
+ (6 << 25) | \
+ ((cp) << 8) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ ((basereg) << 16) | \
+ ((ls) << 20) | \
+ ((wback) << 21) | \
+ (((offset) >= 0) << 23) | \
+ ((wback) << 21) | \
+ ((post) << 24) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_CPT(cond,cp,op,L,Fn,Rd) \
+ (14 << 24) | \
+ (1 << 4) | \
+ ((cp) << 8) | \
+ ((op) << 21) | \
+ ((L) << 20) | \
+ ((Rd) << 12) | \
+ (((Fn) >> 1) << 16) | \
+ (((Fn) & 1) << 7) | \
+ ARM_DEF_COND(cond)
+
+/* FP load and stores */
+#define ARM_FLDS_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_LDR,0,(base),(freg),(offset)))
+#define ARM_FLDS(p,freg,base,offset) \
+ ARM_FLDS_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FLDD_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_LDR,0,(base),(freg),(offset)))
+#define ARM_FLDD(p,freg,base,offset) \
+ ARM_FLDD_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FSTS_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_STR,0,(base),(freg),(offset)))
+#define ARM_FSTS(p,freg,base,offset) \
+ ARM_FSTS_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FSTD_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_STR,0,(base),(freg),(offset)))
+#define ARM_FSTD(p,freg,base,offset) \
+ ARM_FSTD_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FLDMD_COND(p,first_reg,nregs,base,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_LDR,0,(base),(first_reg),((nregs) * 2) << 2))
+
+#define ARM_FLDMD(p,first_reg,nregs,base) \
+ ARM_FLDMD_COND(p,first_reg,nregs,base,ARMCOND_AL)
+
+#define ARM_FSTMD_COND(p,first_reg,nregs,base,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_STR,0,(base),(first_reg),((nregs) * 2) << 2))
+
+#define ARM_FSTMD(p,first_reg,nregs,base) \
+ ARM_FSTMD_COND(p,first_reg,nregs,base,ARMCOND_AL)
+
+#include <mono/arch/arm/arm_vfpmacros.h>
+
+/* coprocessor register transfer */
+#define ARM_FMSR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,0,(freg),(reg)))
+#define ARM_FMRS(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,1,(freg),(reg)))
+
+#define ARM_FMDLR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,0,(freg),(reg)))
+#define ARM_FMRDL(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,1,(freg),(reg)))
+#define ARM_FMDHR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,0,(freg),(reg)))
+#define ARM_FMRDH(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,1,(freg),(reg)))
+
+#define ARM_FMXR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,0,(freg),(reg)))
+#define ARM_FMRX(p,reg,fcreg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,1,(fcreg),(reg)))
+
+#define ARM_FMSTAT(p) \
+ ARM_FMRX((p),ARMREG_R15,ARM_VFP_SCR)
+
+#define ARM_DEF_MCRR(cond,cp,rn,rd,Fm,M) \
+ ((Fm) << 0) | \
+ (1 << 4) | \
+ ((M) << 5) | \
+ ((cp) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((2) << 21) | \
+ (12 << 24) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_FMDRR(p,rd,rn,dm) \
+ ARM_EMIT((p), ARM_DEF_MCRR(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1))
+
+#define ARM_DEF_FMRRD(cond,cp,rn,rd,Dm,D) \
+ ((Dm) << 0) | \
+ (1 << 4) | \
+ ((cp) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((0xc5) << 20) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_FMRRD(p,rd,rn,dm) \
+ ARM_EMIT((p), ARM_DEF_FMRRD(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1))
+
+#define ARM_DEF_FUITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FUITOS(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FUITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FUITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FUITOD(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FUITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FSITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FSITOS(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FSITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FSITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FSITOD(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FSITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#endif /* __MONO_ARM_VFP_CODEGEN_H__ */
+
diff --git a/lib/ffts/src/arch/arm/arm-wmmx.h b/lib/ffts/src/arch/arm/arm-wmmx.h
new file mode 100644
index 0000000..427c4fc
--- /dev/null
+++ b/lib/ffts/src/arch/arm/arm-wmmx.h
@@ -0,0 +1,177 @@
+/*
+ * ARM CodeGen
+ * XScale WirelessMMX extensions
+ * Copyright 2002 Wild West Software
+ */
+
+#ifndef __WMMX_H__
+#define __WMMX_H__ 1
+
+#if 0
+#include <arm-codegen.h>
+#endif
+
+#if defined(ARM_IASM)
+# define WM_ASM(_expr) ARM_IASM(_expr)
+#else
+# define WM_ASM(_expr) __emit (_expr)
+#endif
+
+#if defined(ARM_EMIT)
+# define WM_EMIT(p, i) ARM_EMIT(p, i)
+#else
+# define WM_EMIT(p, i)
+#endif
+
+enum {
+ WM_CC_EQ = 0x0,
+ WM_CC_NE = 0x1,
+ WM_CC_CS = 0x2,
+ WM_CC_HS = WM_CC_CS,
+ WM_CC_CC = 0x3,
+ WM_CC_LO = WM_CC_CC,
+ WM_CC_MI = 0x4,
+ WM_CC_PL = 0x5,
+ WM_CC_VS = 0x6,
+ WM_CC_VC = 0x7,
+ WM_CC_HI = 0x8,
+ WM_CC_LS = 0x9,
+ WM_CC_GE = 0xA,
+ WM_CC_LT = 0xB,
+ WM_CC_GT = 0xC,
+ WM_CC_LE = 0xD,
+ WM_CC_AL = 0xE,
+ WM_CC_NV = 0xF,
+ WM_CC_SHIFT = 28
+};
+
+#if defined(ARM_DEF_COND)
+# define WM_DEF_CC(_cc) ARM_DEF_COND(_cc)
+#else
+# define WM_DEF_CC(_cc) ((_cc & 0xF) << WM_CC_SHIFT)
+#endif
+
+
+enum {
+ WM_R0 = 0x0,
+ WM_R1 = 0x1,
+ WM_R2 = 0x2,
+ WM_R3 = 0x3,
+ WM_R4 = 0x4,
+ WM_R5 = 0x5,
+ WM_R6 = 0x6,
+ WM_R7 = 0x7,
+ WM_R8 = 0x8,
+ WM_R9 = 0x9,
+ WM_R10 = 0xA,
+ WM_R11 = 0xB,
+ WM_R12 = 0xC,
+ WM_R13 = 0xD,
+ WM_R14 = 0xE,
+ WM_R15 = 0xF,
+
+ WM_wR0 = 0x0,
+ WM_wR1 = 0x1,
+ WM_wR2 = 0x2,
+ WM_wR3 = 0x3,
+ WM_wR4 = 0x4,
+ WM_wR5 = 0x5,
+ WM_wR6 = 0x6,
+ WM_wR7 = 0x7,
+ WM_wR8 = 0x8,
+ WM_wR9 = 0x9,
+ WM_wR10 = 0xA,
+ WM_wR11 = 0xB,
+ WM_wR12 = 0xC,
+ WM_wR13 = 0xD,
+ WM_wR14 = 0xE,
+ WM_wR15 = 0xF
+};
+
+
+/*
+ * Qualifiers:
+ * H - 16-bit (HalfWord) SIMD
+ * W - 32-bit (Word) SIMD
+ * D - 64-bit (Double)
+ */
+enum {
+ WM_B = 0,
+ WM_H = 1,
+ WM_D = 2
+};
+
+/*
+ * B.2.3 Transfers From Coprocessor Register (MRC)
+ * Table B-5
+ */
+enum {
+ WM_TMRC_OP2 = 0,
+ WM_TMRC_CPNUM = 1,
+
+ WM_TMOVMSK_OP2 = 1,
+ WM_TMOVMSK_CPNUM = 0,
+
+ WM_TANDC_OP2 = 1,
+ WM_TANDC_CPNUM = 1,
+
+ WM_TORC_OP2 = 2,
+ WM_TORC_CPNUM = 1,
+
+ WM_TEXTRC_OP2 = 3,
+ WM_TEXTRC_CPNUM = 1,
+
+ WM_TEXTRM_OP2 = 3,
+ WM_TEXTRM_CPNUM = 0
+};
+
+
+/*
+ * TANDC<B,H,W>{Cond} R15
+ * Performs AND across the fields of the SIMD PSR register (wCASF) and sends the result
+ * to CPSR; can be performed after a Byte, Half-word or Word operation that sets the flags.
+ * NOTE: R15 is omitted from the macro declaration;
+ */
+#define DEF_WM_TNADC_CC(_q, _cc) WM_DEF_CC((_cc)) + ((_q) << 0x16) + 0xE13F130
+
+#define _WM_TNADC_CC(_q, _cc) WM_ASM(DEF_WM_TNADC_CC(_q, _cc))
+#define ARM_WM_TNADC_CC(_p, _q, _cc) WM_EMIT(_p, DEF_WM_TNADC_CC(_q, _cc))
+
+/* inline assembly */
+#define _WM_TNADC(_q) _WM_TNADC_CC((_q), WM_CC_AL)
+#define _WM_TNADCB() _WM_TNADC(WM_B)
+#define _WM_TNADCH() _WM_TNADC(WM_H)
+#define _WM_TNADCD() _WM_TNADC(WM_D)
+
+/* codegen */
+#define ARM_WM_TNADC(_p, _q) ARM_WM_TNADC_CC((_p), (_q), WM_CC_AL)
+#define ARM_WM_TNADCB(_p) ARM_WM_TNADC(_p, WM_B)
+#define ARM_WM_TNADCH(_p) ARM_WM_TNADC(_p, WM_H)
+#define ARM_WM_TNADCD(_p) ARM_WM_TNADC(_p, WM_D)
+
+
+/*
+ * TBCST<B,H,W>{Cond} wRd, Rn
+ * Broadcasts a value from the ARM Source reg (Rn) to every SIMD position
+ * in the WMMX Destination reg (wRd).
+ */
+#define DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn) \
+ WM_DEF_CC((_cc)) + ((_q) << 6) + ((_wrd) << 16) + ((_rn) << 12) + 0xE200010
+
+#define _WM_TBCST_CC(_q, _cc, _wrd, _rn) WM_ASM(DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn))
+#define ARM_WM_TBCST_CC(_p, _q, _cc, _wrd, _rn) WM_EMIT(_p, DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn))
+
+/* inline */
+#define _WM_TBCST(_q, _wrd, _rn) _WM_TBCST_CC(_q, WM_CC_AL, _wrd, _rn)
+#define _WM_TBCSTB(_wrd, _rn) _WM_TBCST(WM_B)
+#define _WM_TBCSTH(_wrd, _rn) _WM_TBCST(WM_H)
+#define _WM_TBCSTD(_wrd, _rn) _WM_TBCST(WM_D)
+
+/* codegen */
+#define ARM_WM_TBCST(_p, _q, _wrd, _rn) ARM_WM_TBCST_CC(_p, _q, WM_CC_AL, _wrd, _rn)
+#define ARM_WM_TBCSTB(_p, _wrd, _rn) _WM_TBCST(_p, WM_B)
+#define ARM_WM_TBCSTH(_p, _wrd, _rn) _WM_TBCST(_p, WM_H)
+#define ARM_WM_TBCSTD(_p, _wrd, _rn) _WM_TBCST(_p, WM_D)
+
+
+#endif /* __WMMX_H__ */
diff --git a/lib/ffts/src/arch/arm/cmp_macros.th b/lib/ffts/src/arch/arm/cmp_macros.th
new file mode 100644
index 0000000..cb2639d
--- /dev/null
+++ b/lib/ffts/src/arch/arm/cmp_macros.th
@@ -0,0 +1,56 @@
+/* PSR := <Op> Rn, (imm8 ROR 2*rot) */
+#define ARM_<Op>_REG_IMM_COND(p, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, 0, rn, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, rn, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, 0, rn, imm8, rot, cond)
+#define _<Op>_REG_IMM(rn, imm8, rot) \
+ _<Op>_REG_IMM_COND(rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, rn, imm8, cond) \
+ ARM_<Op>_REG_IMM_COND(p, rn, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, rn, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(rn, imm8, cond) \
+ _<Op>_REG_IMM_COND(rn, imm8, 0, cond)
+#define _<Op>_REG_IMM8(rn, imm8) \
+ _<Op>_REG_IMM8_COND(rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, Rm */
+#define ARM_<Op>_REG_REG_COND(p, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, 0, rn, rm, cond)
+#define ARM_<Op>_REG_REG(p, rn, rm) \
+ ARM_<Op>_REG_REG_COND(p, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, 0, rn, rm, cond)
+#define _<Op>_REG_REG(rn, rm) \
+ _<Op>_REG_REG_COND(rn, rm, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, (Rm <shift_type> imm8) */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, 0, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, 0, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rn, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
diff --git a/lib/ffts/src/arch/arm/dpi_macros.th b/lib/ffts/src/arch/arm/dpi_macros.th
new file mode 100644
index 0000000..be43d1f
--- /dev/null
+++ b/lib/ffts/src/arch/arm/dpi_macros.th
@@ -0,0 +1,112 @@
+/* -- <Op> -- */
+
+/* Rd := Rn <Op> (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define ARM_<Op>S_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define _<Op>_REG_IMM(rd, rn, imm8, rot) \
+ _<Op>_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _<Op>S_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define _<Op>S_REG_IMM(rd, rn, imm8, rot) \
+ _<Op>S_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, rd, rn, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_<Op>S_REG_IMM8(p, rd, rn, imm8) \
+ ARM_<Op>S_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _<Op>_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _<Op>_REG_IMM8(rd, rn, imm8) \
+ _<Op>_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _<Op>S_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _<Op>S_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _<Op>S_REG_IMM8(rd, rn, imm8) \
+ _<Op>S_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> Rm */
+#define ARM_<Op>_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_<Op>, rd, rn, rm, cond)
+#define ARM_<Op>_REG_REG(p, rd, rn, rm) \
+ ARM_<Op>_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_<Op>S_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, rd, rn, rm, cond)
+#define ARM_<Op>S_REG_REG(p, rd, rn, rm) \
+ ARM_<Op>S_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_<Op>, rd, rn, rm, cond)
+#define _<Op>_REG_REG(rd, rn, rm) \
+ _<Op>_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _<Op>S_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, rd, rn, rm, cond)
+#define _<Op>S_REG_REG(rd, rn, rm) \
+ _<Op>S_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> (Rm <shift_type> imm_shift) */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>S_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _<Op>S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>S_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _<Op>S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> (Rm <shift_type> Rs) */
+#define ARM_<Op>_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define ARM_<Op>_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_<Op>_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define ARM_<Op>S_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define _<Op>_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _<Op>_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _<Op>S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define _<Op>S_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _<Op>S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
diff --git a/lib/ffts/src/arch/arm/dpiops.sh b/lib/ffts/src/arch/arm/dpiops.sh
new file mode 100755
index 0000000..d3b93ff
--- /dev/null
+++ b/lib/ffts/src/arch/arm/dpiops.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+OPCODES="AND EOR SUB RSB ADD ADC SBC RSC ORR BIC"
+CMP_OPCODES="TST TEQ CMP CMN"
+MOV_OPCODES="MOV MVN"
+
+# $1: opcode list
+# $2: template
+gen() {
+ for i in $1; do
+ sed "s/<Op>/$i/g" $2.th
+ done
+}
+
+
+
+echo -e "/* Macros for DPI ops, auto-generated from template */\n"
+
+echo -e "\n/* mov/mvn */\n"
+gen "$MOV_OPCODES" mov_macros
+
+echo -e "\n/* DPIs, arithmetic and logical */\n"
+gen "$OPCODES" dpi_macros
+
+echo -e "\n\n"
+
+echo -e "\n/* DPIs, comparison */\n"
+gen "$CMP_OPCODES" cmp_macros
+
+echo -e "\n/* end generated */\n"
diff --git a/lib/ffts/src/arch/arm/mov_macros.th b/lib/ffts/src/arch/arm/mov_macros.th
new file mode 100644
index 0000000..6bac290
--- /dev/null
+++ b/lib/ffts/src/arch/arm/mov_macros.th
@@ -0,0 +1,121 @@
+/* Rd := imm8 ROR rot */
+#define ARM_<Op>_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, reg, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define ARM_<Op>S_REG_IMM(p, reg, imm8, rot) \
+ ARM_<Op>S_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define _<Op>_REG_IMM(reg, imm8, rot) \
+ _<Op>_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define _<Op>S_REG_IMM(reg, imm8, rot) \
+ _<Op>S_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, reg, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define ARM_<Op>S_REG_IMM8(p, reg, imm8) \
+ ARM_<Op>S_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define _<Op>_REG_IMM8(reg, imm8) \
+ _<Op>_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define _<Op>S_REG_IMM8(reg, imm8) \
+ _<Op>S_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm */
+#define ARM_<Op>_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_<Op>, rd, 0, rm, cond)
+#define ARM_<Op>_REG_REG(p, rd, rm) \
+ ARM_<Op>_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, rd, 0, rm, cond)
+#define ARM_<Op>S_REG_REG(p, rd, rm) \
+ ARM_<Op>S_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_<Op>, rd, 0, rm, cond)
+#define _<Op>_REG_REG(rd, rm) \
+ _<Op>_REG_REG_COND(rd, rm, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, rd, 0, rm, cond)
+#define _<Op>S_REG_REG(rd, rm) \
+ _<Op>S_REG_REG_COND(rd, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm <shift_type> imm_shift */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>S_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define _<Op>S_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _<Op>S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+
+/* Rd := (Rm <shift_type> Rs) */
+#define ARM_<Op>_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define ARM_<Op>_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_<Op>_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define ARM_<Op>S_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define _<Op>_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _<Op>_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define _<Op>S_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _<Op>S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
diff --git a/lib/ffts/src/arch/arm/tramp.c b/lib/ffts/src/arch/arm/tramp.c
new file mode 100644
index 0000000..f736c7a
--- /dev/null
+++ b/lib/ffts/src/arch/arm/tramp.c
@@ -0,0 +1,710 @@
+/*
+ * Create trampolines to invoke arbitrary functions.
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ *
+ * Contributions by Malte Hildingson
+ */
+
+#include "arm-codegen.h"
+#include "arm-dis.h"
+
+#if defined(_WIN32_WCE) || defined (UNDER_CE)
+# include <windows.h>
+#else
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+
+#if !defined(PLATFORM_MACOSX)
+#include <errno.h>
+
+#include "mono/metadata/class.h"
+#include "mono/metadata/tabledefs.h"
+#include "mono/interpreter/interp.h"
+#include "mono/metadata/appdomain.h"
+
+
+#if 0
+# define ARM_DUMP_DISASM 1
+#endif
+
+/* prototypes for private functions (to avoid compiler warnings) */
+void flush_icache (void);
+void* alloc_code_buff (int num_instr);
+
+
+
+/*
+ * The resulting function takes the form:
+ * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments);
+ * NOTE: all args passed in ARM registers (A1-A4),
+ * then copied to R4-R7 (see definitions below).
+ */
+
+#define REG_FUNC_ADDR ARMREG_R4
+#define REG_RETVAL ARMREG_R5
+#define REG_THIS ARMREG_R6
+#define REG_ARGP ARMREG_R7
+
+
+#define ARG_SIZE sizeof(stackval)
+
+
+
+
+void flush_icache ()
+{
+#if defined(_WIN32)
+ FlushInstructionCache(GetCurrentProcess(), NULL, 0);
+#else
+# if 0
+ asm ("mov r0, r0");
+ asm ("mov r0, #0");
+ asm ("mcr p15, 0, r0, c7, c7, 0");
+# else
+ /* TODO: use (movnv pc, rx) method */
+# endif
+#endif
+}
+
+
+void* alloc_code_buff (int num_instr)
+{
+ void* code_buff;
+ int code_size = num_instr * sizeof(arminstr_t);
+
+#if defined(_WIN32) || defined(UNDER_CE)
+ int old_prot = 0;
+
+ code_buff = malloc(code_size);
+ VirtualProtect(code_buff, code_size, PAGE_EXECUTE_READWRITE, &old_prot);
+#else
+ int page_size = sysconf(_SC_PAGESIZE);
+ int new_code_size;
+
+ new_code_size = code_size + page_size - 1;
+ code_buff = malloc(new_code_size);
+ code_buff = (void *) (((int) code_buff + page_size - 1) & ~(page_size - 1));
+
+ if (mprotect(code_buff, code_size, PROT_READ|PROT_WRITE|PROT_EXEC) != 0) {
+ g_critical (G_GNUC_PRETTY_FUNCTION
+ ": mprotect error: %s", g_strerror (errno));
+ }
+#endif
+
+ return code_buff;
+}
+
+
+/*
+ * Refer to ARM Procedure Call Standard (APCS) for more info.
+ */
+MonoPIFunc mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor)
+{
+ MonoType* param;
+ MonoPIFunc code_buff;
+ arminstr_t* p;
+ guint32 code_size, stack_size;
+ guint32 simple_type;
+ int i, hasthis, aregs, regc, stack_offs;
+ int this_loaded;
+ guchar reg_alloc [ARM_NUM_ARG_REGS];
+
+ /* pessimistic estimation for prologue/epilogue size */
+ code_size = 16 + 16;
+ /* push/pop work regs */
+ code_size += 2;
+ /* call */
+ code_size += 2;
+ /* handle retval */
+ code_size += 2;
+
+ stack_size = 0;
+ hasthis = sig->hasthis ? 1 : 0;
+
+ aregs = ARM_NUM_ARG_REGS - hasthis;
+
+ for (i = 0, regc = aregs; i < sig->param_count; ++i) {
+ param = sig->params [i];
+
+ /* keep track of argument sizes */
+ if (i < ARM_NUM_ARG_REGS) reg_alloc [i] = 0;
+
+ if (param->byref) {
+ if (regc > 0) {
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ code_size += 2;
+ stack_size += sizeof(gpointer);
+ }
+ } else {
+ simple_type = param->type;
+enum_calc_size:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ if (regc > 0) {
+ /* register arg */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ /* stack arg */
+ code_size += 2;
+ stack_size += 4;
+ }
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ /* keep track of argument sizes */
+ if (regc > 1) {
+ /* fits into registers, two LDRs */
+ code_size += 2;
+ reg_alloc [i] = regc;
+ regc -= 2;
+ } else if (regc > 0) {
+ /* first half fits into register, one LDR */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ /* the rest on the stack, LDR/STR */
+ code_size += 2;
+ stack_size += 4;
+ } else {
+ /* stack arg, 4 instrs - 2x(LDR/STR) */
+ code_size += 4;
+ stack_size += 2 * 4;
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (param->data.klass->enumtype) {
+ simple_type = param->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+
+ if (mono_class_value_size(param->data.klass, NULL) != 4) {
+ g_error("can only marshal enums, not generic structures (size: %d)", mono_class_value_size(param->data.klass, NULL));
+ }
+ if (regc > 0) {
+ /* register arg */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ /* stack arg */
+ code_size += 2;
+ stack_size += 4;
+ }
+ break;
+ default :
+ break;
+ }
+ }
+ }
+
+ code_buff = (MonoPIFunc)alloc_code_buff(code_size);
+ p = (arminstr_t*)code_buff;
+
+ /* prologue */
+ p = arm_emit_lean_prologue(p, stack_size,
+ /* save workset (r4-r7) */
+ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7));
+
+
+ /* copy args into workset */
+ /* callme - always present */
+ ARM_MOV_REG_REG(p, ARMREG_R4, ARMREG_A1);
+ /* retval */
+ if (sig->ret->byref || string_ctor || (sig->ret->type != MONO_TYPE_VOID)) {
+ ARM_MOV_REG_REG(p, ARMREG_R5, ARMREG_A2);
+ }
+ /* this_obj */
+ if (sig->hasthis) {
+ this_loaded = 0;
+ if (stack_size == 0) {
+ ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_A3);
+ this_loaded = 1;
+ } else {
+ ARM_MOV_REG_REG(p, ARMREG_R6, ARMREG_A3);
+ }
+ }
+ /* args */
+ if (sig->param_count != 0) {
+ ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_A4);
+ }
+
+ stack_offs = stack_size;
+
+ /* handle arguments */
+ /* in reverse order so we could use r0 (arg1) for memory transfers */
+ for (i = sig->param_count; --i >= 0;) {
+ param = sig->params [i];
+ if (param->byref) {
+ if (i < aregs && reg_alloc[i] > 0) {
+ ARM_LDR_IMM(p, ARMREG_A1 + i, REG_ARGP, i*ARG_SIZE);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ } else {
+ simple_type = param->type;
+enum_marshal:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ if (i < aregs && reg_alloc [i] > 0) {
+ /* pass in register */
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ if (i < aregs && reg_alloc [i] > 0) {
+ if (reg_alloc [i] > 1) {
+ /* pass in registers */
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]) + 1, REG_ARGP, i*ARG_SIZE + 4);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ }
+ } else {
+ /* two words transferred on the stack */
+ stack_offs -= 2*sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs + 4);
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (param->data.klass->enumtype) {
+ /* it's an enum value, proceed based on its base type */
+ simple_type = param->data.klass->enum_basetype->type;
+ goto enum_marshal;
+ } else {
+ if (i < aregs && reg_alloc[i] > 0) {
+ int vtreg = ARMREG_A1 + hasthis +
+ hasthis + (aregs - reg_alloc[i]);
+ ARM_LDR_IMM(p, vtreg, REG_ARGP, i * ARG_SIZE);
+ ARM_LDR_IMM(p, vtreg, vtreg, 0);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i * ARG_SIZE);
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R0, 0);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (sig->hasthis && !this_loaded) {
+ /* [this] always passed in A1, regardless of sig->call_convention */
+ ARM_MOV_REG_REG(p, ARMREG_A1, REG_THIS);
+ }
+
+ /* call [func] */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, REG_FUNC_ADDR);
+
+ /* handle retval */
+ if (sig->ret->byref || string_ctor) {
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ } else {
+ simple_type = sig->ret->type;
+enum_retvalue:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ ARM_STRB_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ ARM_STRH_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ /*
+ * A 32-bit integer and integer-equivalent return value
+ * is returned in R0.
+ * Single-precision floating-point values are returned in R0.
+ */
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ /*
+ * A 64-bit integer is returned in R0 and R1.
+ * Double-precision floating-point values are returned in R0 and R1.
+ */
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ ARM_STR_IMM(p, ARMREG_R1, REG_RETVAL, 4);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simple_type = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ break;
+ }
+ }
+
+ p = arm_emit_std_epilogue(p, stack_size,
+ /* restore R4-R7 */
+ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7));
+
+ flush_icache();
+
+#ifdef ARM_DUMP_DISASM
+ _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff));
+#endif
+
+ return code_buff;
+}
+
+
+
+#define MINV_OFFS(member) G_STRUCT_OFFSET(MonoInvocation, member)
+
+
+
+/*
+ * Returns a pointer to a native function that can be used to
+ * call the specified method.
+ * The function created will receive the arguments according
+ * to the call convention specified in the method.
+ * This function works by creating a MonoInvocation structure,
+ * filling the fields in and calling ves_exec_method on it.
+ * Still need to figure out how to handle the exception stuff
+ * across the managed/unmanaged boundary.
+ */
+void* mono_arch_create_method_pointer (MonoMethod* method)
+{
+ MonoMethodSignature* sig;
+ guchar* p, * p_method, * p_stackval_from_data, * p_exec;
+ void* code_buff;
+ int i, stack_size, arg_pos, arg_add, stackval_pos, offs;
+ int areg, reg_args, shift, pos;
+ MonoJitInfo *ji;
+
+ code_buff = alloc_code_buff(128);
+ p = (guchar*)code_buff;
+
+ sig = method->signature;
+
+ ARM_B(p, 3);
+
+ /* embed magic number followed by method pointer */
+ *p++ = 'M';
+ *p++ = 'o';
+ *p++ = 'n';
+ *p++ = 'o';
+ /* method ptr */
+ *(void**)p = method;
+ p_method = p;
+ p += 4;
+
+ /* call table */
+ *(void**)p = stackval_from_data;
+ p_stackval_from_data = p;
+ p += 4;
+ *(void**)p = ves_exec_method;
+ p_exec = p;
+ p += 4;
+
+ stack_size = sizeof(MonoInvocation) + ARG_SIZE*(sig->param_count + 1) + ARM_NUM_ARG_REGS*2*sizeof(armword_t);
+
+ /* prologue */
+ p = (guchar*)arm_emit_lean_prologue((arminstr_t*)p, stack_size,
+ (1 << ARMREG_R4) |
+ (1 << ARMREG_R5) |
+ (1 << ARMREG_R6) |
+ (1 << ARMREG_R7));
+
+ /* R7 - ptr to stack args */
+ ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_IP);
+
+ /*
+ * Initialize MonoInvocation fields, first the ones known now.
+ */
+ ARM_MOV_REG_IMM8(p, ARMREG_R4, 0);
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex_handler));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(parent));
+
+ /* Set the method pointer. */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, -(int)(p - p_method + sizeof(arminstr_t)*2));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(method));
+
+ if (sig->hasthis) {
+ /* [this] in A1 */
+ ARM_STR_IMM(p, ARMREG_A1, ARMREG_SP, MINV_OFFS(obj));
+ } else {
+ /* else set minv.obj to NULL */
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(obj));
+ }
+
+ /* copy args from registers to stack */
+ areg = ARMREG_A1 + sig->hasthis;
+ arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t);
+ arg_add = 0;
+ for (i = 0; i < sig->param_count; ++i) {
+ if (areg >= ARM_NUM_ARG_REGS) break;
+ ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos);
+ ++areg;
+ if (!sig->params[i]->byref) {
+ switch (sig->params[i]->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ if (areg >= ARM_NUM_ARG_REGS) {
+ /* load second half of 64-bit arg */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_R7, 0);
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_R7, arg_pos + sizeof(armword_t));
+ arg_add = sizeof(armword_t);
+ } else {
+ /* second half is already the register */
+ ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos + sizeof(armword_t));
+ ++areg;
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ /* assert */
+ default:
+ break;
+ }
+ }
+ arg_pos += 2 * sizeof(armword_t);
+ }
+ /* number of args passed in registers */
+ reg_args = i;
+
+
+
+ /*
+ * Calc and save stack args ptr,
+ * args follow MonoInvocation struct on the stack.
+ */
+ ARM_ADD_REG_IMM8(p, ARMREG_R1, ARMREG_SP, sizeof(MonoInvocation));
+ ARM_STR_IMM(p, ARMREG_R1, ARMREG_SP, MINV_OFFS(stack_args));
+
+ /* convert method args to stackvals */
+ arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t);
+ stackval_pos = sizeof(MonoInvocation);
+ for (i = 0; i < sig->param_count; ++i) {
+ if (i < reg_args) {
+ ARM_SUB_REG_IMM8(p, ARMREG_A3, ARMREG_R7, -arg_pos);
+ arg_pos += 2 * sizeof(armword_t);
+ } else {
+ if (arg_pos < 0) arg_pos = 0;
+ pos = arg_pos + arg_add;
+ if (pos <= 0xFF) {
+ ARM_ADD_REG_IMM8(p, ARMREG_A3, ARMREG_R7, pos);
+ } else {
+ if (is_arm_const((armword_t)pos)) {
+ shift = calc_arm_mov_const_shift((armword_t)pos);
+ ARM_ADD_REG_IMM(p, ARMREG_A3, ARMREG_R7, pos >> ((32 - shift) & 31), shift >> 1);
+ } else {
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)pos);
+ ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_R7, ARMREG_R6);
+ }
+ }
+ arg_pos += sizeof(armword_t);
+ if (!sig->params[i]->byref) {
+ switch (sig->params[i]->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ arg_pos += sizeof(armword_t);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ /* assert */
+ default:
+ break;
+ }
+ }
+ }
+
+ /* A2 = result */
+ if (stackval_pos <= 0xFF) {
+ ARM_ADD_REG_IMM8(p, ARMREG_A2, ARMREG_SP, stackval_pos);
+ } else {
+ if (is_arm_const((armword_t)stackval_pos)) {
+ shift = calc_arm_mov_const_shift((armword_t)stackval_pos);
+ ARM_ADD_REG_IMM(p, ARMREG_A2, ARMREG_SP, stackval_pos >> ((32 - shift) & 31), shift >> 1);
+ } else {
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)stackval_pos);
+ ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_SP, ARMREG_R6);
+ }
+ }
+
+ /* A1 = type */
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_A1, (armword_t)sig->params [i]);
+
+ stackval_pos += ARG_SIZE;
+
+ offs = -(p + 2*sizeof(arminstr_t) - p_stackval_from_data);
+ /* load function address */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs);
+ /* call stackval_from_data */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4);
+ }
+
+ /* store retval ptr */
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R5, (armword_t)stackval_pos);
+ ARM_ADD_REG_REG(p, ARMREG_R5, ARMREG_SP, ARMREG_R4);
+ ARM_STR_IMM(p, ARMREG_R5, ARMREG_SP, MINV_OFFS(retval));
+
+ /*
+ * Call the method.
+ */
+ /* A1 = MonoInvocation ptr */
+ ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_SP);
+ offs = -(p + 2*sizeof(arminstr_t) - p_exec);
+ /* load function address */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs);
+ /* call ves_exec */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4);
+
+
+ /*
+ * Move retval into reg.
+ */
+ if (sig->ret->byref) {
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ } else {
+ switch (sig->ret->type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ ARM_LDRB_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ ARM_LDRH_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ ARM_LDR_IMM(p, ARMREG_R1, ARMREG_R5, 4);
+ break;
+ case MONO_TYPE_VOID:
+ default:
+ break;
+ }
+ }
+
+
+ p = (guchar*)arm_emit_std_epilogue((arminstr_t*)p, stack_size,
+ (1 << ARMREG_R4) |
+ (1 << ARMREG_R5) |
+ (1 << ARMREG_R6) |
+ (1 << ARMREG_R7));
+
+ flush_icache();
+
+#ifdef ARM_DUMP_DISASM
+ _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff));
+#endif
+
+ ji = g_new0(MonoJitInfo, 1);
+ ji->method = method;
+ ji->code_size = ((guint8 *) p) - ((guint8 *) code_buff);
+ ji->code_start = (gpointer) code_buff;
+
+ mono_jit_info_table_add(mono_get_root_domain (), ji);
+
+ return code_buff;
+}
+
+
+/*
+ * mono_create_method_pointer () will insert a pointer to the MonoMethod
+ * so that the interp can easily get at the data: this function will retrieve
+ * the method from the code stream.
+ */
+MonoMethod* mono_method_pointer_get (void* code)
+{
+ unsigned char* c = code;
+ /* check out magic number that follows unconditional branch */
+ if (c[4] == 'M' &&
+ c[5] == 'o' &&
+ c[6] == 'n' &&
+ c[7] == 'o') return ((MonoMethod**)code)[2];
+ return NULL;
+}
+#endif
diff --git a/lib/ffts/src/arch/arm/vfp_macros.th b/lib/ffts/src/arch/arm/vfp_macros.th
new file mode 100644
index 0000000..cca67dc
--- /dev/null
+++ b/lib/ffts/src/arch/arm/vfp_macros.th
@@ -0,0 +1,15 @@
+/* -- <Op> -- */
+
+
+/* Fd := Fn <Op> Fm */
+#define ARM_VFP_<Op>D_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_<Op>,rd,rn,rm))
+#define ARM_VFP_<Op>D(p, rd, rn, rm) \
+ ARM_VFP_<Op>D_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#define ARM_VFP_<Op>S_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_<Op>,rd,rn,rm))
+#define ARM_VFP_<Op>S(p, rd, rn, rm) \
+ ARM_VFP_<Op>S_COND(p, rd, rn, rm, ARMCOND_AL)
+
+
diff --git a/lib/ffts/src/arch/arm/vfpm_macros.th b/lib/ffts/src/arch/arm/vfpm_macros.th
new file mode 100644
index 0000000..25ad721
--- /dev/null
+++ b/lib/ffts/src/arch/arm/vfpm_macros.th
@@ -0,0 +1,14 @@
+/* -- <Op> -- */
+
+
+/* Fd := <Op> Fm */
+
+#define ARM_<Op>D_COND(p,dreg,sreg,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_<Op>,(dreg),(sreg)))
+#define ARM_<Op>D(p,dreg,sreg) ARM_<Op>D_COND(p,dreg,sreg,ARMCOND_AL)
+
+#define ARM_<Op>S_COND(p,dreg,sreg,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_<Op>,(dreg),(sreg)))
+#define ARM_<Op>S(p,dreg,sreg) ARM_<Op>S_COND(p,dreg,sreg,ARMCOND_AL)
+
+
diff --git a/lib/ffts/src/arch/arm/vfpops.sh b/lib/ffts/src/arch/arm/vfpops.sh
new file mode 100755
index 0000000..bed4a9c
--- /dev/null
+++ b/lib/ffts/src/arch/arm/vfpops.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+DYADIC="ADD SUB MUL NMUL DIV"
+MONADIC="CPY ABS NEG SQRT CMP CMPE CMPZ CMPEZ CVT UITO SITO TOUI TOSI TOUIZ TOSIZ"
+
+# $1: opcode list
+# $2: template
+gen() {
+ for i in $1; do
+ sed "s/<Op>/$i/g" $2.th
+ done
+}
+
+echo -e "/* Macros for VFP ops, auto-generated from template */\n"
+
+echo -e "\n/* dyadic */\n"
+gen "$DYADIC" vfp_macros
+
+echo -e "\n/* monadic */\n"
+gen "$MONADIC" vfpm_macros
+
+echo -e "\n\n"
+
+echo -e "\n/* end generated */\n"
diff --git a/lib/ffts/src/arch/arm64/.gitignore b/lib/ffts/src/arch/arm64/.gitignore
new file mode 100644
index 0000000..13efac7
--- /dev/null
+++ b/lib/ffts/src/arch/arm64/.gitignore
@@ -0,0 +1,6 @@
+/
+/Makefile
+/Makefile.in
+/*.o
+/*.lo
+/.deps
diff --git a/lib/ffts/src/arch/arm64/Makefile.am b/lib/ffts/src/arch/arm64/Makefile.am
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ffts/src/arch/arm64/Makefile.am
diff --git a/lib/ffts/src/arch/arm64/arm64-codegen.h b/lib/ffts/src/arch/arm64/arm64-codegen.h
new file mode 100644
index 0000000..259ff96
--- /dev/null
+++ b/lib/ffts/src/arch/arm64/arm64-codegen.h
@@ -0,0 +1,3 @@
+#include "../../../../mono-extensions/mono/arch/arm64/arm64-codegen.h"
+
+
diff --git a/lib/ffts/src/arch/ia64/.gitignore b/lib/ffts/src/arch/ia64/.gitignore
new file mode 100644
index 0000000..b336cc7
--- /dev/null
+++ b/lib/ffts/src/arch/ia64/.gitignore
@@ -0,0 +1,2 @@
+/Makefile
+/Makefile.in
diff --git a/lib/ffts/src/arch/ia64/Makefile.am b/lib/ffts/src/arch/ia64/Makefile.am
new file mode 100644
index 0000000..e03ea47
--- /dev/null
+++ b/lib/ffts/src/arch/ia64/Makefile.am
@@ -0,0 +1,3 @@
+EXTRA_DIST = ia64-codegen.h
+
+
diff --git a/lib/ffts/src/arch/ia64/codegen.c b/lib/ffts/src/arch/ia64/codegen.c
new file mode 100644
index 0000000..97e1aef
--- /dev/null
+++ b/lib/ffts/src/arch/ia64/codegen.c
@@ -0,0 +1,861 @@
+/*
+ * codegen.c: Tests for the IA64 code generation macros
+ */
+
+#include <glib.h>
+#include <stdio.h>
+#include <ctype.h>
+
+#define IA64_SIMPLE_EMIT_BUNDLE
+
+#include <mono/arch/ia64/ia64-codegen.h>
+
+void
+mono_disassemble_code (guint8 *code, int size, char *id)
+{
+ int i;
+ FILE *ofd;
+ const char *tmp = g_get_tmp_dir ();
+ const char *objdump_args = g_getenv ("MONO_OBJDUMP_ARGS");
+ char *as_file;
+ char *o_file;
+ char *cmd;
+
+ as_file = g_strdup_printf ("%s/test.s", tmp);
+
+ if (!(ofd = fopen (as_file, "w")))
+ g_assert_not_reached ();
+
+ for (i = 0; id [i]; ++i) {
+ if (!isalnum (id [i]))
+ fprintf (ofd, "_");
+ else
+ fprintf (ofd, "%c", id [i]);
+ }
+ fprintf (ofd, ":\n");
+
+ for (i = 0; i < size; ++i)
+ fprintf (ofd, ".byte %d\n", (unsigned int) code [i]);
+
+ fclose (ofd);
+
+#ifdef __ia64__
+#define DIS_CMD "objdump -d"
+#define AS_CMD "as"
+#else
+#define DIS_CMD "ia64-linux-gnu-objdump -d"
+#define AS_CMD "ia64-linux-gnu-as"
+#endif
+
+ o_file = g_strdup_printf ("%s/test.o", tmp);
+ cmd = g_strdup_printf (AS_CMD " %s -o %s", as_file, o_file);
+ system (cmd);
+ g_free (cmd);
+ if (!objdump_args)
+ objdump_args = "";
+
+ cmd = g_strdup_printf (DIS_CMD " %s %s", objdump_args, o_file);
+ system (cmd);
+ g_free (cmd);
+
+ g_free (o_file);
+ g_free (as_file);
+}
+
+int
+main ()
+{
+ Ia64CodegenState code;
+
+ guint8 *buf = g_malloc0 (40960);
+
+ ia64_codegen_init (code, buf);
+
+ ia64_add (code, 1, 2, 3);
+ ia64_add1 (code, 1, 2, 3);
+ ia64_sub (code, 1, 2, 3);
+ ia64_sub1 (code, 1, 2, 3);
+ ia64_addp4 (code, 1, 2, 3);
+ ia64_and (code, 1, 2, 3);
+ ia64_andcm (code, 1, 2, 3);
+ ia64_or (code, 1, 2, 3);
+ ia64_xor (code, 1, 2, 3);
+ ia64_shladd (code, 1, 2, 3, 4);
+ ia64_shladdp4 (code, 1, 2, 3, 4);
+ ia64_sub_imm (code, 1, 0x7f, 2);
+ ia64_sub_imm (code, 1, -1, 2);
+ ia64_and_imm (code, 1, -128, 2);
+ ia64_andcm_imm (code, 1, -128, 2);
+ ia64_or_imm (code, 1, -128, 2);
+ ia64_xor_imm (code, 1, -128, 2);
+ ia64_adds_imm (code, 1, 8191, 2);
+ ia64_adds_imm (code, 1, -8192, 2);
+ ia64_adds_imm (code, 1, 1234, 2);
+ ia64_adds_imm (code, 1, -1234, 2);
+ ia64_addp4_imm (code, 1, -1234, 2);
+ ia64_addl_imm (code, 1, 1234, 2);
+ ia64_addl_imm (code, 1, -1234, 2);
+ ia64_addl_imm (code, 1, 2097151, 2);
+ ia64_addl_imm (code, 1, -2097152, 2);
+
+ ia64_cmp_lt (code, 1, 2, 1, 2);
+ ia64_cmp_ltu (code, 1, 2, 1, 2);
+ ia64_cmp_eq (code, 1, 2, 1, 2);
+ ia64_cmp_lt_unc (code, 1, 2, 1, 2);
+ ia64_cmp_ltu_unc (code, 1, 2, 1, 2);
+ ia64_cmp_eq_unc (code, 1, 2, 1, 2);
+ ia64_cmp_eq_and (code, 1, 2, 1, 2);
+ ia64_cmp_eq_or (code, 1, 2, 1, 2);
+ ia64_cmp_eq_or_andcm (code, 1, 2, 1, 2);
+ ia64_cmp_ne_and (code, 1, 2, 1, 2);
+ ia64_cmp_ne_or (code, 1, 2, 1, 2);
+ ia64_cmp_ne_or_andcm (code, 1, 2, 1, 2);
+
+ ia64_cmp4_lt (code, 1, 2, 1, 2);
+ ia64_cmp4_ltu (code, 1, 2, 1, 2);
+ ia64_cmp4_eq (code, 1, 2, 1, 2);
+ ia64_cmp4_lt_unc (code, 1, 2, 1, 2);
+ ia64_cmp4_ltu_unc (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_unc (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_and (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_or (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_or_andcm (code, 1, 2, 1, 2);
+ ia64_cmp4_ne_and (code, 1, 2, 1, 2);
+ ia64_cmp4_ne_or (code, 1, 2, 1, 2);
+ ia64_cmp4_ne_or_andcm (code, 1, 2, 1, 2);
+
+ ia64_cmp_gt_and (code, 1, 2, 0, 2);
+ ia64_cmp_gt_or (code, 1, 2, 0, 2);
+ ia64_cmp_gt_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp_le_and (code, 1, 2, 0, 2);
+ ia64_cmp_le_or (code, 1, 2, 0, 2);
+ ia64_cmp_le_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp_ge_and (code, 1, 2, 0, 2);
+ ia64_cmp_ge_or (code, 1, 2, 0, 2);
+ ia64_cmp_ge_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp_lt_and (code, 1, 2, 0, 2);
+ ia64_cmp_lt_or (code, 1, 2, 0, 2);
+ ia64_cmp_lt_or_andcm (code, 1, 2, 0, 2);
+
+ ia64_cmp4_gt_and (code, 1, 2, 0, 2);
+ ia64_cmp4_gt_or (code, 1, 2, 0, 2);
+ ia64_cmp4_gt_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp4_le_and (code, 1, 2, 0, 2);
+ ia64_cmp4_le_or (code, 1, 2, 0, 2);
+ ia64_cmp4_le_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp4_ge_and (code, 1, 2, 0, 2);
+ ia64_cmp4_ge_or (code, 1, 2, 0, 2);
+ ia64_cmp4_ge_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp4_lt_and (code, 1, 2, 0, 2);
+ ia64_cmp4_lt_or (code, 1, 2, 0, 2);
+ ia64_cmp4_lt_or_andcm (code, 1, 2, 0, 2);
+
+ ia64_cmp_lt_imm (code, 1, 2, 127, 2);
+ ia64_cmp_lt_imm (code, 1, 2, -128, 2);
+
+ ia64_cmp_lt_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ltu_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_imm (code, 1, 2, -128, 2);
+ ia64_cmp_lt_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ltu_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ne_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ne_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ne_or_andcm_imm (code, 1, 2, -128, 2);
+
+ ia64_cmp4_lt_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ltu_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_lt_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ltu_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ne_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ne_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ne_or_andcm_imm (code, 1, 2, -128, 2);
+
+ ia64_padd1 (code, 1, 2, 3);
+ ia64_padd2 (code, 1, 2, 3);
+ ia64_padd4 (code, 1, 2, 3);
+ ia64_padd1_sss (code, 1, 2, 3);
+ ia64_padd2_sss (code, 1, 2, 3);
+ ia64_padd1_uuu (code, 1, 2, 3);
+ ia64_padd2_uuu (code, 1, 2, 3);
+ ia64_padd1_uus (code, 1, 2, 3);
+ ia64_padd2_uus (code, 1, 2, 3);
+
+ ia64_psub1 (code, 1, 2, 3);
+ ia64_psub2 (code, 1, 2, 3);
+ ia64_psub4 (code, 1, 2, 3);
+ ia64_psub1_sss (code, 1, 2, 3);
+ ia64_psub2_sss (code, 1, 2, 3);
+ ia64_psub1_uuu (code, 1, 2, 3);
+ ia64_psub2_uuu (code, 1, 2, 3);
+ ia64_psub1_uus (code, 1, 2, 3);
+ ia64_psub2_uus (code, 1, 2, 3);
+
+ ia64_pavg1 (code, 1, 2, 3);
+ ia64_pavg2 (code, 1, 2, 3);
+ ia64_pavg1_raz (code, 1, 2, 3);
+ ia64_pavg2_raz (code, 1, 2, 3);
+ ia64_pavgsub1 (code, 1, 2, 3);
+ ia64_pavgsub2 (code, 1, 2, 3);
+ ia64_pcmp1_eq (code, 1, 2, 3);
+ ia64_pcmp2_eq (code, 1, 2, 3);
+ ia64_pcmp4_eq (code, 1, 2, 3);
+ ia64_pcmp1_gt (code, 1, 2, 3);
+ ia64_pcmp2_gt (code, 1, 2, 3);
+ ia64_pcmp4_gt (code, 1, 2, 3);
+
+ ia64_pshladd2 (code, 1, 2, 3, 4);
+ ia64_pshradd2 (code, 1, 2, 3, 4);
+
+ ia64_pmpyshr2 (code, 1, 2, 3, 0);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 0);
+ ia64_pmpyshr2 (code, 1, 2, 3, 7);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 7);
+ ia64_pmpyshr2 (code, 1, 2, 3, 15);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 15);
+ ia64_pmpyshr2 (code, 1, 2, 3, 16);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 16);
+
+ ia64_pmpy2_r (code, 1, 2, 3);
+ ia64_pmpy2_l (code, 1, 2, 3);
+ ia64_mix1_r (code, 1, 2, 3);
+ ia64_mix2_r (code, 1, 2, 3);
+ ia64_mix4_r (code, 1, 2, 3);
+ ia64_mix1_l (code, 1, 2, 3);
+ ia64_mix2_l (code, 1, 2, 3);
+ ia64_mix4_l (code, 1, 2, 3);
+ ia64_pack2_uss (code, 1, 2, 3);
+ ia64_pack2_sss (code, 1, 2, 3);
+ ia64_pack4_sss (code, 1, 2, 3);
+ ia64_unpack1_h (code, 1, 2, 3);
+ ia64_unpack2_h (code, 1, 2, 3);
+ ia64_unpack4_h (code, 1, 2, 3);
+ ia64_unpack1_l (code, 1, 2, 3);
+ ia64_unpack2_l (code, 1, 2, 3);
+ ia64_unpack4_l (code, 1, 2, 3);
+ ia64_pmin1_u (code, 1, 2, 3);
+ ia64_pmax1_u (code, 1, 2, 3);
+ ia64_pmin2 (code, 1, 2, 3);
+ ia64_pmax2 (code, 1, 2, 3);
+ ia64_psad1 (code, 1, 2, 3);
+
+ ia64_mux1 (code, 1, 2, IA64_MUX1_BRCST);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_MIX);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_SHUF);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_ALT);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_REV);
+
+ ia64_mux2 (code, 1, 2, 0x8d);
+
+ ia64_pshr2 (code, 1, 2, 3);
+ ia64_pshr4 (code, 1, 2, 3);
+ ia64_shr (code, 1, 2, 3);
+ ia64_pshr2_u (code, 1, 2, 3);
+ ia64_pshr4_u (code, 1, 2, 3);
+ ia64_shr_u (code, 1, 2, 3);
+
+ ia64_pshr2_imm (code, 1, 2, 20);
+ ia64_pshr4_imm (code, 1, 2, 20);
+ ia64_pshr2_u_imm (code, 1, 2, 20);
+ ia64_pshr4_u_imm (code, 1, 2, 20);
+
+ ia64_pshl2 (code, 1, 2, 3);
+ ia64_pshl4 (code, 1, 2, 3);
+ ia64_shl (code, 1, 2, 3);
+
+ ia64_pshl2_imm (code, 1, 2, 20);
+ ia64_pshl4_imm (code, 1, 2, 20);
+
+ ia64_popcnt (code, 1, 2);
+
+ ia64_shrp (code, 1, 2, 3, 62);
+
+ ia64_extr_u (code, 1, 2, 62, 61);
+ ia64_extr (code, 1, 2, 62, 61);
+
+ ia64_dep_z (code, 1, 2, 62, 61);
+
+ ia64_dep_z_imm (code, 1, 127, 62, 61);
+ ia64_dep_z_imm (code, 1, -128, 62, 61);
+ ia64_dep_imm (code, 1, 0, 2, 62, 61);
+ ia64_dep_imm (code, 1, -1, 2, 62, 61);
+ ia64_dep (code, 1, 2, 3, 10, 15);
+
+ ia64_tbit_z (code, 1, 2, 3, 0);
+
+ ia64_tbit_z (code, 1, 2, 3, 63);
+ ia64_tbit_z_unc (code, 1, 2, 3, 63);
+ ia64_tbit_z_and (code, 1, 2, 3, 63);
+ ia64_tbit_nz_and (code, 1, 2, 3, 63);
+ ia64_tbit_z_or (code, 1, 2, 3, 63);
+ ia64_tbit_nz_or (code, 1, 2, 3, 63);
+ ia64_tbit_z_or_andcm (code, 1, 2, 3, 63);
+ ia64_tbit_nz_or_andcm (code, 1, 2, 3, 63);
+
+ ia64_tnat_z (code, 1, 2, 3);
+ ia64_tnat_z_unc (code, 1, 2, 3);
+ ia64_tnat_z_and (code, 1, 2, 3);
+ ia64_tnat_nz_and (code, 1, 2, 3);
+ ia64_tnat_z_or (code, 1, 2, 3);
+ ia64_tnat_nz_or (code, 1, 2, 3);
+ ia64_tnat_z_or_andcm (code, 1, 2, 3);
+ ia64_tnat_nz_or_andcm (code, 1, 2, 3);
+
+ ia64_nop_i (code, 0x1234);
+ ia64_hint_i (code, 0x1234);
+
+ ia64_break_i (code, 0x1234);
+
+ ia64_chk_s_i (code, 1, 0);
+ ia64_chk_s_i (code, 1, -1);
+ ia64_chk_s_i (code, 1, 1);
+
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0);
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0);
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0);
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP);
+ ia64_mov_ret_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0);
+
+ ia64_mov_from_br (code, 1, 1);
+
+ ia64_mov_to_pred (code, 1, 0xfe);
+
+ ia64_mov_to_pred_rot_imm (code, 0xff0000);
+
+ ia64_mov_from_ip (code, 1);
+ ia64_mov_from_pred (code, 1);
+
+ ia64_mov_to_ar_i (code, 1, 1);
+
+ ia64_mov_to_ar_imm_i (code, 1, 127);
+
+ ia64_mov_from_ar_i (code, 1, 1);
+
+ ia64_zxt1 (code, 1, 2);
+ ia64_zxt2 (code, 1, 2);
+ ia64_zxt4 (code, 1, 2);
+ ia64_sxt1 (code, 1, 2);
+ ia64_sxt2 (code, 1, 2);
+ ia64_sxt4 (code, 1, 2);
+
+ ia64_czx1_l (code, 1, 2);
+ ia64_czx2_l (code, 1, 2);
+ ia64_czx1_r (code, 1, 2);
+ ia64_czx2_r (code, 1, 2);
+
+ ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NONE);
+ ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NT1);
+ ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NTA);
+
+ ia64_ld1_hint (code, 1, 2, 0);
+ ia64_ld2_hint (code, 1, 2, 0);
+ ia64_ld4_hint (code, 1, 2, 0);
+ ia64_ld8_hint (code, 1, 2, 0);
+
+ ia64_ld1_s_hint (code, 1, 2, 0);
+ ia64_ld2_s_hint (code, 1, 2, 0);
+ ia64_ld4_s_hint (code, 1, 2, 0);
+ ia64_ld8_s_hint (code, 1, 2, 0);
+
+ ia64_ld1_a_hint (code, 1, 2, 0);
+ ia64_ld2_a_hint (code, 1, 2, 0);
+ ia64_ld4_a_hint (code, 1, 2, 0);
+ ia64_ld8_a_hint (code, 1, 2, 0);
+
+ ia64_ld1_sa_hint (code, 1, 2, 0);
+ ia64_ld2_sa_hint (code, 1, 2, 0);
+ ia64_ld4_sa_hint (code, 1, 2, 0);
+ ia64_ld8_sa_hint (code, 1, 2, 0);
+
+ ia64_ld1_bias_hint (code, 1, 2, 0);
+ ia64_ld2_bias_hint (code, 1, 2, 0);
+ ia64_ld4_bias_hint (code, 1, 2, 0);
+ ia64_ld8_bias_hint (code, 1, 2, 0);
+
+ ia64_ld1_inc_hint (code, 1, 2, 3, IA64_LD_HINT_NONE);
+
+ ia64_ld1_inc_imm_hint (code, 1, 2, 255, IA64_LD_HINT_NONE);
+ ia64_ld1_inc_imm_hint (code, 1, 2, -256, IA64_LD_HINT_NONE);
+
+ ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NTA);
+
+ ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st2_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st4_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st8_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st1_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st2_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st4_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st8_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st8_spill_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st16_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st16_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st1_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st2_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st4_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st8_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+
+ ia64_st1_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st2_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st4_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st8_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+
+ ia64_st8_spill_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+
+ ia64_ldfs_hint (code, 1, 2, 0);
+ ia64_ldfd_hint (code, 1, 2, 0);
+ ia64_ldf8_hint (code, 1, 2, 0);
+ ia64_ldfe_hint (code, 1, 2, 0);
+
+ ia64_ldfs_s_hint (code, 1, 2, 0);
+ ia64_ldfd_s_hint (code, 1, 2, 0);
+ ia64_ldf8_s_hint (code, 1, 2, 0);
+ ia64_ldfe_s_hint (code, 1, 2, 0);
+
+ ia64_ldfs_a_hint (code, 1, 2, 0);
+ ia64_ldfd_a_hint (code, 1, 2, 0);
+ ia64_ldf8_a_hint (code, 1, 2, 0);
+ ia64_ldfe_a_hint (code, 1, 2, 0);
+
+ ia64_ldfs_sa_hint (code, 1, 2, 0);
+ ia64_ldfd_sa_hint (code, 1, 2, 0);
+ ia64_ldf8_sa_hint (code, 1, 2, 0);
+ ia64_ldfe_sa_hint (code, 1, 2, 0);
+
+ ia64_ldfs_c_clr_hint (code, 1, 2, 0);
+ ia64_ldfd_c_clr_hint (code, 1, 2, 0);
+ ia64_ldf8_c_clr_hint (code, 1, 2, 0);
+ ia64_ldfe_c_clr_hint (code, 1, 2, 0);
+
+ ia64_ldfs_c_nc_hint (code, 1, 2, 0);
+ ia64_ldfd_c_nc_hint (code, 1, 2, 0);
+ ia64_ldf8_c_nc_hint (code, 1, 2, 0);
+ ia64_ldfe_c_nc_hint (code, 1, 2, 0);
+
+ ia64_ldf_fill_hint (code, 1, 2, 0);
+
+ ia64_ldfs_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_s_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_a_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_sa_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_c_clr_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_c_nc_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldf_fill_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_s_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_s_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_s_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_s_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_a_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_a_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_a_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_a_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_sa_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_sa_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_sa_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_sa_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldf_fill_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_stfs_hint (code, 1, 2, 0);
+ ia64_stfd_hint (code, 1, 2, 0);
+ ia64_stf8_hint (code, 1, 2, 0);
+ ia64_stfe_hint (code, 1, 2, 0);
+
+ ia64_stf_spill_hint (code, 1, 2, 0);
+
+ ia64_stfs_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_stfd_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_stf8_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_stfe_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_stf_spill_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfps_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_s_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_s_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_s_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_a_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_a_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_a_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_sa_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_sa_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_sa_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_clr_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_clr_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_clr_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_nc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_nc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_nc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_s_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_a_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_sa_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_clr_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_nc_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_lfetch_hint (code, 1, 0);
+ ia64_lfetch_excl_hint (code, 1, 0);
+ ia64_lfetch_fault_hint (code, 1, 0);
+ ia64_lfetch_fault_excl_hint (code, 1, 0);
+
+ ia64_lfetch_hint (code, 1, IA64_LFHINT_NT1);
+ ia64_lfetch_hint (code, 1, IA64_LFHINT_NT2);
+ ia64_lfetch_hint (code, 1, IA64_LFHINT_NTA);
+
+ ia64_lfetch_inc_hint (code, 1, 2, 0);
+ ia64_lfetch_excl_inc_hint (code, 1, 2, 0);
+ ia64_lfetch_fault_inc_hint (code, 1, 2, 0);
+ ia64_lfetch_fault_excl_inc_hint (code, 1, 2, 0);
+
+ ia64_lfetch_inc_imm_hint (code, 1, 255, 0);
+ ia64_lfetch_excl_inc_imm_hint (code, 1, 255, 0);
+ ia64_lfetch_fault_inc_imm_hint (code, 1, 255, 0);
+ ia64_lfetch_fault_excl_inc_imm_hint (code, 1, 255, 0);
+
+ ia64_cmpxchg1_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg2_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg4_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg8_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg1_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg2_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg4_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg8_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg16_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg16_rel_hint (code, 1, 2, 3, 0);
+ ia64_xchg1_hint (code, 1, 2, 3, 0);
+ ia64_xchg2_hint (code, 1, 2, 3, 0);
+ ia64_xchg4_hint (code, 1, 2, 3, 0);
+ ia64_xchg8_hint (code, 1, 2, 3, 0);
+
+ ia64_fetchadd4_acq_hint (code, 1, 2, -16, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, -8, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, -4, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, -1, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 1, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 4, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 8, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0);
+
+ ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0);
+ ia64_fetchadd8_acq_hint (code, 1, 2, 16, 0);
+ ia64_fetchadd4_rel_hint (code, 1, 2, 16, 0);
+ ia64_fetchadd8_rel_hint (code, 1, 2, 16, 0);
+
+ ia64_setf_sig (code, 1, 2);
+ ia64_setf_exp (code, 1, 2);
+ ia64_setf_s (code, 1, 2);
+ ia64_setf_d (code, 1, 2);
+
+ ia64_getf_sig (code, 1, 2);
+ ia64_getf_exp (code, 1, 2);
+ ia64_getf_s (code, 1, 2);
+ ia64_getf_d (code, 1, 2);
+
+ ia64_chk_s_m (code, 1, 0);
+ ia64_chk_s_m (code, 1, 1);
+ ia64_chk_s_m (code, 1, -1);
+
+ ia64_chk_s_float_m (code, 1, 0);
+
+ ia64_chk_a_nc (code, 1, 0);
+ ia64_chk_a_nc (code, 1, 1);
+ ia64_chk_a_nc (code, 1, -1);
+
+ ia64_chk_a_nc (code, 1, 0);
+ ia64_chk_a_clr (code, 1, 0);
+
+ ia64_chk_a_nc_float (code, 1, 0);
+ ia64_chk_a_clr_float (code, 1, 0);
+
+ ia64_invala (code);
+ ia64_fwb (code);
+ ia64_mf (code);
+ ia64_mf_a (code);
+ ia64_srlz_d (code);
+ ia64_stlz_i (code);
+ ia64_sync_i (code);
+
+ ia64_flushrs (code);
+ ia64_loadrs (code);
+
+ ia64_invala_e (code, 1);
+ ia64_invala_e_float (code, 1);
+
+ ia64_fc (code, 1);
+ ia64_fc_i (code, 1);
+
+ ia64_mov_to_ar_m (code, 1, 1);
+
+ ia64_mov_to_ar_imm_m (code, 1, 127);
+
+ ia64_mov_from_ar_m (code, 1, 1);
+
+ ia64_mov_to_cr (code, 1, 2);
+
+ ia64_mov_from_cr (code, 1, 2);
+
+ ia64_alloc (code, 1, 3, 4, 5, 0);
+ ia64_alloc (code, 1, 3, 4, 5, 8);
+
+ ia64_mov_to_psr_l (code, 1);
+ ia64_mov_to_psr_um (code, 1);
+
+ ia64_mov_from_psr (code, 1);
+ ia64_mov_from_psr_um (code, 1);
+
+ ia64_break_m (code, 0x1234);
+ ia64_nop_m (code, 0x1234);
+ ia64_hint_m (code, 0x1234);
+
+ ia64_br_cond_hint (code, 0, 0, 0, 0);
+ ia64_br_wexit_hint (code, 0, 0, 0, 0);
+ ia64_br_wtop_hint (code, 0, 0, 0, 0);
+
+ ia64_br_cloop_hint (code, 0, 0, 0, 0);
+ ia64_br_cexit_hint (code, 0, 0, 0, 0);
+ ia64_br_ctop_hint (code, 0, 0, 0, 0);
+
+ ia64_br_call_hint (code, 1, 0, 0, 0, 0);
+
+ ia64_br_cond_reg_hint (code, 1, 0, 0, 0);
+ ia64_br_ia_reg_hint (code, 1, 0, 0, 0);
+ ia64_br_ret_reg_hint (code, 1, 0, 0, 0);
+
+ ia64_br_call_reg_hint (code, 1, 2, 0, 0, 0);
+
+ ia64_cover (code);
+ ia64_clrrrb (code);
+ ia64_clrrrb_pr (code);
+ ia64_rfi (code);
+ ia64_bsw_0 (code);
+ ia64_bsw_1 (code);
+ ia64_epc (code);
+
+ ia64_break_b (code, 0x1234);
+ ia64_nop_b (code, 0x1234);
+ ia64_hint_b (code, 0x1234);
+
+ ia64_break_x (code, 0x2123456789ABCDEFULL);
+
+ ia64_movl (code, 1, 0x123456789ABCDEF0LL);
+
+ ia64_brl_cond_hint (code, 0, 0, 0, 0);
+ ia64_brl_cond_hint (code, -1, 0, 0, 0);
+
+ ia64_brl_call_hint (code, 1, 0, 0, 0, 0);
+ ia64_brl_call_hint (code, 1, -1, 0, 0, 0);
+
+ ia64_nop_x (code, 0x2123456789ABCDEFULL);
+ ia64_hint_x (code, 0x2123456789ABCDEFULL);
+
+ ia64_movl_pred (code, 1, 1, 0x123456789ABCDEF0LL);
+
+ /* FLOATING-POINT */
+ ia64_fma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fma_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fma_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fpma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fms_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fms_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fms_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fpms_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fnma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fnma_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fnma_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fpnma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+
+ ia64_xma_l_pred (code, 1, 1, 2, 3, 4);
+ ia64_xma_h_pred (code, 1, 1, 2, 3, 4);
+ ia64_xma_hu_pred (code, 1, 1, 2, 3, 4);
+
+ ia64_fselect_pred (code, 1, 1, 2, 3, 4);
+
+ ia64_fcmp_eq_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_lt_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_le_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_unord_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_eq_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_lt_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_le_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_unord_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+
+ ia64_fclass_m_pred (code, 1, 1, 2, 3, 0x1ff);
+ ia64_fclass_m_unc_pred (code, 1, 1, 2, 3, 0x1ff);
+
+ ia64_frcpa_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fprcpa_sf_pred (code, 1, 1, 2, 3, 4, 0);
+
+ ia64_frsqrta_sf_pred (code, 1, 1, 2, 4, 0);
+ ia64_fprsqrta_sf_pred (code, 1, 1, 2, 4, 0);
+
+ ia64_fmin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fman_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_famin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_famax_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpmin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpman_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpamin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpamax_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_eq_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_lt_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_le_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_unord_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_neq_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_nlt_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_nle_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_ord_sf_pred (code, 1, 2, 3, 4, 0);
+
+ ia64_fmerge_s_pred (code, 1, 2, 3, 4);
+ ia64_fmerge_ns_pred (code, 1, 2, 3, 4);
+ ia64_fmerge_se_pred (code, 1, 2, 3, 4);
+ ia64_fmix_lr_pred (code, 1, 2, 3, 4);
+ ia64_fmix_r_pred (code, 1, 2, 3, 4);
+ ia64_fmix_l_pred (code, 1, 2, 3, 4);
+ ia64_fsxt_r_pred (code, 1, 2, 3, 4);
+ ia64_fsxt_l_pred (code, 1, 2, 3, 4);
+ ia64_fpack_pred (code, 1, 2, 3, 4);
+ ia64_fswap_pred (code, 1, 2, 3, 4);
+ ia64_fswap_nl_pred (code, 1, 2, 3, 4);
+ ia64_fswap_nr_pred (code, 1, 2, 3, 4);
+ ia64_fand_pred (code, 1, 2, 3, 4);
+ ia64_fandcm_pred (code, 1, 2, 3, 4);
+ ia64_for_pred (code, 1, 2, 3, 4);
+ ia64_fxor_pred (code, 1, 2, 3, 4);
+ ia64_fpmerge_s_pred (code, 1, 2, 3, 4);
+ ia64_fpmerge_ns_pred (code, 1, 2, 3, 4);
+ ia64_fpmerge_se_pred (code, 1, 2, 3, 4);
+
+ ia64_fcvt_fx_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fcvt_fxu_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fx_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fxu_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0);
+
+ ia64_fcvt_xf_pred ((code), 1, 2, 3);
+
+ ia64_fsetc_sf_pred ((code), 1, 0x33, 0x33, 3);
+
+ ia64_fclrf_sf_pred ((code), 1, 3);
+
+ ia64_fchkf_sf_pred ((code), 1, -1, 3);
+
+ ia64_break_f_pred ((code), 1, 0x1234);
+
+ ia64_movl (code, 31, -123456);
+
+ ia64_codegen_close (code);
+
+#if 0
+ /* disassembly */
+ {
+ guint8 *buf = code.buf;
+ int template;
+ guint64 dw1, dw2;
+ guint64 ins1, ins2, ins3;
+
+ ia64_break_i (code, 0x1234);
+
+ ia64_codegen_close (code);
+
+ dw1 = ((guint64*)buf) [0];
+ dw2 = ((guint64*)buf) [1];
+
+ template = ia64_bundle_template (buf);
+ ins1 = ia64_bundle_ins1 (buf);
+ ins2 = ia64_bundle_ins2 (buf);
+ ins3 = ia64_bundle_ins3 (buf);
+
+ code.buf = buf;
+ ia64_emit_bundle_template (&code, template, ins1, ins2, ins3);
+
+ g_assert (dw1 == ((guint64*)buf) [0]);
+ g_assert (dw2 == ((guint64*)buf) [1]);
+ }
+#endif
+
+ mono_disassemble_code (buf, 40960, "code");
+
+ return 0;
+}
diff --git a/lib/ffts/src/arch/ia64/ia64-codegen.h b/lib/ffts/src/arch/ia64/ia64-codegen.h
new file mode 100644
index 0000000..1793580
--- /dev/null
+++ b/lib/ffts/src/arch/ia64/ia64-codegen.h
@@ -0,0 +1,3183 @@
+/*
+ * ia64-codegen.h: Macros for generating ia64 code
+ *
+ * Authors:
+ * Zoltan Varga (vargaz@gmail.com)
+ *
+ * (C) 2005 Novell, Inc.
+ */
+
+#ifndef _IA64_CODEGEN_H_
+#define _IA64_CODEGEN_H_
+
+#include <glib.h>
+#include <string.h>
+
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+
+typedef enum {
+ IA64_INS_TYPE_A,
+ IA64_INS_TYPE_I,
+ IA64_INS_TYPE_M,
+ IA64_INS_TYPE_F,
+ IA64_INS_TYPE_B,
+ IA64_INS_TYPE_LX
+} Ia64InsType;
+
+typedef enum {
+ IA64_TEMPLATE_MII = 0x00,
+ IA64_TEMPLATE_MIIS = 0x01,
+ IA64_TEMPLATE_MISI = 0x02,
+ IA64_TEMPLATE_MISIS = 0x03,
+ IA64_TEMPLATE_MLX = 0x04,
+ IA64_TEMPLATE_MLXS = 0x05,
+ IA64_TEMPLATE_UNUS1 = 0x06,
+ IA64_TEMPLATE_UNUS2 = 0x07,
+ IA64_TEMPLATE_MMI = 0x08,
+ IA64_TEMPLATE_MMIS = 0x09,
+ IA64_TEMPLATE_MSMI = 0x0A,
+ IA64_TEMPLATE_MSMIS = 0x0B,
+ IA64_TEMPLATE_MFI = 0x0C,
+ IA64_TEMPLATE_MFIS = 0x0D,
+ IA64_TEMPLATE_MMF = 0x0E,
+ IA64_TEMPLATE_MMFS = 0x0F,
+ IA64_TEMPLATE_MIB = 0x10,
+ IA64_TEMPLATE_MIBS = 0x11,
+ IA64_TEMPLATE_MBB = 0x12,
+ IA64_TEMPLATE_MBBS = 0x13,
+ IA64_TEMPLATE_UNUS3 = 0x14,
+ IA64_TEMPLATE_UNUS4 = 0x15,
+ IA64_TEMPLATE_BBB = 0x16,
+ IA64_TEMPLATE_BBBS = 0x17,
+ IA64_TEMPLATE_MMB = 0x18,
+ IA64_TEMPLATE_MMBS = 0x19,
+ IA64_TEMPLATE_UNUS5 = 0x1A,
+ IA64_TEMPLATE_UNUS6 = 0x1B,
+ IA64_TEMPLATE_MFB = 0x1C,
+ IA64_TEMPLATE_MFBS = 0x1D,
+ IA64_TEMPLATE_UNUS7 = 0x1E,
+ IA64_TEMPLATE_UNUS8 = 0x1F,
+} Ia64BundleTemplate;
+
+typedef enum {
+ IA64_R0 = 0,
+ IA64_R1 = 1,
+ IA64_R2 = 2,
+ IA64_R3 = 3,
+ IA64_R4 = 4,
+ IA64_R5 = 5,
+ IA64_R6 = 6,
+ IA64_R7 = 7,
+ IA64_R8 = 8,
+ IA64_R9 = 9,
+ IA64_R10 = 10,
+ IA64_R11 = 11,
+ IA64_R12 = 12,
+ IA64_R13 = 13,
+ IA64_R14 = 14,
+ IA64_R15 = 15,
+ IA64_R16 = 16,
+ IA64_R17 = 17,
+ IA64_R18 = 18,
+ IA64_R19 = 19,
+ IA64_R20 = 20,
+ IA64_R21 = 21,
+ IA64_R22 = 22,
+ IA64_R23 = 23,
+ IA64_R24 = 24,
+ IA64_R25 = 25,
+ IA64_R26 = 26,
+ IA64_R27 = 27,
+ IA64_R28 = 28,
+ IA64_R29 = 29,
+ IA64_R30 = 30,
+ IA64_R31 = 31,
+
+ /* Aliases */
+ IA64_GP = IA64_R1,
+ IA64_SP = IA64_R12,
+ IA64_TP = IA64_R13
+} Ia64GeneralRegister;
+
+typedef enum {
+ IA64_B0 = 0,
+ IA64_B1 = 1,
+ IA64_B2 = 2,
+ IA64_B3 = 3,
+ IA64_B4 = 4,
+ IA64_B5 = 5,
+ IA64_B6 = 6,
+ IA64_B7 = 7,
+
+ /* Aliases */
+ IA64_RP = IA64_B0
+} Ia64BranchRegister;
+
+typedef enum {
+ IA64_CCV = 32,
+ IA64_PFS = 64
+} Ia64ApplicationRegister;
+
+/* disassembly */
+#define ia64_bundle_template(code) ((*(guint64*)(gpointer)code) & 0x1f)
+#define ia64_bundle_ins1(code) (((*(guint64*)(gpointer)code) >> 5) & 0x1ffffffffff)
+#define ia64_bundle_ins2(code) (((*(guint64*)(gpointer)code) >> 46) | ((((guint64*)(gpointer)code)[1] & 0x7fffff) << 18))
+#define ia64_bundle_ins3(code) ((((guint64*)(gpointer)code)[1]) >> 23)
+
+#define ia64_ins_opcode(ins) (((guint64)(ins)) >> 37)
+#define ia64_ins_qp(ins) (((guint64)(ins)) & 0x3f)
+#define ia64_ins_r1(ins) ((((guint64)(ins)) >> 6) & 0x7f)
+#define ia64_ins_r2(ins) ((((guint64)(ins)) >> 13) & 0x7f)
+#define ia64_ins_r3(ins) ((((guint64)(ins)) >> 20) & 0x7f)
+
+#define ia64_ins_b1(ins) ((((guint64)(ins)) >> 6) & 0x7)
+#define ia64_ins_b2(ins) ((((guint64)(ins)) >> 13) & 0x7)
+#define ia64_ins_btype(ins) ((((guint64)(ins)) >> 6) & 0x7)
+#define ia64_ins_x(ins) ((((guint64)(ins)) >> 22) & 0x1)
+#define ia64_ins_x2a(ins) ((((guint64)(ins)) >> 34) & 0x3)
+#define ia64_ins_x2b(ins) ((((guint64)(ins)) >> 27) & 0x3)
+#define ia64_ins_x3(ins) ((((guint64)(ins)) >> 33) & 0x7)
+#define ia64_ins_x4(ins) ((((guint64)(ins)) >> 29) & 0xf)
+#define ia64_ins_x6(ins) ((((guint64)(ins)) >> 27) & 0x3f)
+#define ia64_ins_y(ins) ((((guint64)(ins)) >> 26) & 0x1)
+#define ia64_ins_vc(ins) ((((guint64)(ins)) >> 20) & 0x1)
+#define ia64_ins_ve(ins) ((((guint64)(ins)) >> 33) & 0x1)
+
+#define IA64_NOP_I ((0x01 << 27))
+#define IA64_NOP_M ((0x01 << 27))
+#define IA64_NOP_B (((long)0x02 << 37))
+#define IA64_NOP_F ((0x01 << 27))
+#define IA64_NOP_X ((0x01 << 27))
+
+/*
+ * READ_PR_BRANCH and WRITE_PR_FLOAT are used to be able to place comparisons
+ * + branches in the same instruction group.
+ */
+typedef enum {
+ IA64_READ_GR,
+ IA64_WRITE_GR,
+ IA64_READ_PR,
+ IA64_WRITE_PR,
+ IA64_READ_PR_BRANCH,
+ IA64_WRITE_PR_FLOAT,
+ IA64_READ_BR,
+ IA64_WRITE_BR,
+ IA64_READ_BR_BRANCH,
+ IA64_READ_FR,
+ IA64_WRITE_FR,
+ IA64_READ_AR,
+ IA64_WRITE_AR,
+ IA64_NO_STOP,
+ IA64_END_OF_INS,
+ IA64_NONE
+} Ia64Dependency;
+
+/*
+ * IA64 code cannot be emitted in the same way as code on other processors,
+ * since 3 instructions are combined into a bundle. This structure keeps track
+ * of already emitted instructions.
+ *
+ */
+
+#define IA64_INS_BUFFER_SIZE 4
+#define MAX_UNW_OPS 8
+
+typedef struct {
+ guint8 *buf;
+ guint one_ins_per_bundle : 1;
+ int nins, template, dep_info_pos, unw_op_pos, unw_op_count;
+ guint64 instructions [IA64_INS_BUFFER_SIZE];
+ int itypes [IA64_INS_BUFFER_SIZE];
+ guint8 *region_start;
+ guint8 dep_info [128];
+ unw_dyn_op_t unw_ops [MAX_UNW_OPS];
+ /* The index of the instruction to which the given unw op belongs */
+ guint8 unw_ops_pos [MAX_UNW_OPS];
+} Ia64CodegenState;
+
+#ifdef IA64_SIMPLE_EMIT_BUNDLE
+G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush);
+#else
+void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush);
+#endif
+
+#define ia64_codegen_init(code, codegen_buf) do { \
+ code.buf = codegen_buf; \
+ code.region_start = code.buf; \
+ code.nins = 0; \
+ code.one_ins_per_bundle = 0; \
+ code.dep_info_pos = 0; \
+ code.unw_op_count = 0; \
+ code.unw_op_pos = 0; \
+} while (0)
+
+#define ia64_codegen_close(code) do { \
+ ia64_emit_bundle (&code, TRUE); \
+} while (0)
+
+#define ia64_begin_bundle(code) do { \
+ ia64_emit_bundle (&code, TRUE); \
+} while (0)
+
+#define ia64_codegen_set_one_ins_per_bundle(code, is_one) do { \
+ ia64_begin_bundle (code); \
+ code.one_ins_per_bundle = (is_one); \
+} while (0)
+
+#define ia64_begin_bundle_template(code, bundle_template) do { \
+ ia64_emit_bundle (&code, TRUE); \
+ code.template = (bundle_template); \
+} while (0)
+
+#define ia64_unw_save_reg(code, reg, dreg) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_save_reg (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, -1, reg, dreg); \
+} while (0)
+
+#define ia64_unw_add(code, reg, val) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_add (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, reg, val); \
+} while (0)
+
+#define ia64_unw_pop_frames(code, nframes) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_pop_frames (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, (nframes)); \
+} while (0)
+
+#define ia64_unw_label_state(code, id) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_label_state (&(code.unw_ops [code.unw_op_count ++]), (id)); \
+} while (0)
+
+
+#define ia64_unw_copy_state(code, id) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_copy_state (&(code.unw_ops [code.unw_op_count ++]), (id)); \
+} while (0)
+
+#if 0
+/* To ease debugging, emit instructions immediately */
+#define EMIT_BUNDLE(itype, code) ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE);
+#else
+#define EMIT_BUNDLE(itype, code) if ((itype == IA64_INS_TYPE_LX) && (code.nins == 2)) ia64_emit_bundle (&code, FALSE);
+#endif
+
+#define ia64_emit_ins(code, itype, ins) do { \
+ code.instructions [code.nins] = ins; \
+ code.itypes [code.nins] = itype; \
+ code.nins ++; \
+ code.dep_info [code.dep_info_pos ++] = IA64_END_OF_INS; \
+ code.dep_info [code.dep_info_pos ++] = 0; \
+ EMIT_BUNDLE (itype, code); \
+ if (code.nins == IA64_INS_BUFFER_SIZE) \
+ ia64_emit_bundle (&code, FALSE); \
+} while (0)
+
+#define ia64_no_stop(code) do { \
+ code.dep_info [code.dep_info_pos ++] = IA64_NO_STOP; \
+ code.dep_info [code.dep_info_pos ++] = 0; \
+} while (0)
+
+#if G_BYTE_ORDER != G_LITTLE_ENDIAN
+#error "FIXME"
+#endif
+
+#define ia64_emit_bundle_template(code, template, i1, i2, i3) do { \
+ guint64 *buf64 = (guint64*)(gpointer)(code)->buf; \
+ guint64 dw1, dw2; \
+ dw1 = (((guint64)(template)) & 0x1f) | ((guint64)(i1) << 5) | ((((guint64)(i2)) & 0x3ffff) << 46); \
+ dw2 = (((guint64)(i2)) >> 18) | (((guint64)(i3)) << 23); \
+ buf64[0] = dw1; \
+ buf64[1] = dw2; \
+ (code)->buf += 16; \
+} while (0)
+
+#ifdef IA64_SIMPLE_EMIT_BUNDLE
+
+G_GNUC_UNUSED static void
+ia64_emit_bundle (Ia64CodegenState *code, gboolean flush)
+{
+ int i;
+
+ for (i = 0; i < code->nins; ++i) {
+ switch (code->itypes [i]) {
+ case IA64_INS_TYPE_A:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, code->instructions [i], IA64_NOP_I, IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_I:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [i], IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_M:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, code->instructions [i], IA64_NOP_I, IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_B:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [i]);
+ break;
+ case IA64_INS_TYPE_F:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [i], IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_LX:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [i], code->instructions [i + 1]);
+ i ++;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+
+ code->nins = 0;
+ code->dep_info_pos = 0;
+}
+
+#endif /* IA64_SIMPLE_EMIT_BUNDLE */
+
+#define ia64_is_imm8(imm) (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127))
+#define ia64_is_imm14(imm) (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191))
+#define ia64_is_imm21(imm) (((gint64)(imm) >= -0x100000) && ((gint64)(imm) <= (0x100000 - 1)))
+
+#define ia64_is_adds_imm(imm) ia64_is_imm14((imm))
+
+#if 1
+
+#define check_assert(cond) g_assert((cond))
+
+#else
+
+#define check_assert(cond)
+
+#endif
+
+#define check_greg(gr) check_assert ((guint64)(gr) < 128)
+
+#define check_freg(fr) check_assert ((guint64)(fr) < 128)
+
+#define check_fr(fr) check_assert ((guint64)(fr) < 128)
+
+#define check_preg(pr) check_assert ((guint64)(pr) < 64)
+
+#define check_breg(pr) check_assert ((guint64)(pr) < 8)
+
+#define check_count2(count) check_assert (((count) >= 1) && ((count) <= 4))
+
+#define check_count5(count) check_assert (((count) >= 0) && ((count) < 32))
+
+#define check_count6(count) check_assert (((count) >= 0) && ((count) < 64))
+
+#define check_imm1(imm) check_assert (((gint64)(imm) >= -1) && ((gint64)(imm) <= 0))
+#define check_imm3(imm) check_assert (((gint64)(imm) >= -4) && ((gint64)(imm) <= 3))
+#define check_imm8(imm) check_assert (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127))
+#define check_imm9(imm) check_assert (((gint64)(imm) >= -256) && ((gint64)(imm) <= 255))
+#define check_imm14(imm) check_assert (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191))
+#define check_imm21(imm) check_assert (((gint64)(imm) >= -0x100000) && ((gint64)(imm) <= (0x100000 - 1)))
+#define check_imm22(imm) check_assert (((gint64)(imm) >= -0x200000) && ((gint64)(imm) <= (0x200000 - 1)))
+#define check_imm62(imm) check_assert (((gint64)(imm) >= -0x2fffffffffffffffLL) && ((gint64)(imm) <= (0x2fffffffffffffffLL - 1)))
+
+#define check_len4(len) check_assert (((gint64)(len) >= 1) && ((gint64)(len) <= 16))
+
+#define check_bwh(bwh) check_assert ((bwh) >= 0 && (bwh) <= IA64_BWH_DPNT)
+
+#define check_ph(ph) check_assert ((ph) >= 0 && (ph) <= IA64_PH_MANY)
+
+#define check_dh(dh) check_assert ((dh) >= 0 && (dh) <= IA64_DH_CLR)
+
+#define check_sf(sf) check_assert ((sf) >= 0 && (sf) <= 3)
+
+#define sign_bit(imm) ((gint64)(imm) < 0 ? 1 : 0)
+
+/* Dependency info */
+#define read_gr(code, gr) do { \
+ check_greg ((gr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_GR; \
+ code.dep_info [code.dep_info_pos ++] = gr; \
+} while (0)
+
+#define write_gr(code, gr) do { \
+ check_greg ((gr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_GR; \
+ code.dep_info [code.dep_info_pos ++] = gr; \
+} while (0)
+
+#define read_pr(code,pr) do { \
+ if ((pr) != 0) { \
+ check_preg ((pr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_PR; \
+ code.dep_info [code.dep_info_pos ++] = (pr); \
+ } \
+} while (0)
+
+#define write_pr(code,pr) do { \
+ if ((pr) != 0) { \
+ check_preg ((pr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_PR; \
+ code.dep_info [code.dep_info_pos ++] = (pr); \
+ } \
+} while (0)
+
+#define read_pr_branch(code,reg) do { \
+ check_preg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_PR_BRANCH; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_pr_fp(code,reg) do { \
+ check_preg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_PR_FLOAT; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_br(code,reg) do { \
+ check_breg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_BR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_br(code,reg) do { \
+ check_breg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_BR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_br_branch(code,reg) do { \
+ check_breg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_BR_BRANCH; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_fr(code,reg) do { \
+ check_freg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_FR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_fr(code,reg) do { \
+ check_freg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_FR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_ar(code,reg) do { \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_AR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_ar(code,reg) do { \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_AR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define ia64_emit_ins_1(code,itype,f1,o1) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1))))
+
+#define ia64_emit_ins_3(code,itype,f1,o1,f2,o2,f3,o3) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3))))
+
+#define ia64_emit_ins_5(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5))))
+
+#define ia64_emit_ins_6(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6))))
+
+#define ia64_emit_ins_7(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7))))
+
+#define ia64_emit_ins_8(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8))))
+
+#define ia64_emit_ins_9(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9))))
+
+#define ia64_emit_ins_10(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10))))
+
+#define ia64_emit_ins_11(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10,f11,o11) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10)) | ((guint64)(f11) << (o11))))
+
+/*
+ * A-Unit instructions
+ */
+
+#define ia64_a1(code, qp, r1, r2, r3, x2a, ve, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0)
+
+#define ia64_add_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 0)
+#define ia64_add1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 1)
+#define ia64_sub_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 1, 1)
+#define ia64_sub1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 1, 0)
+#define ia64_addp4_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 2, 0)
+#define ia64_and_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 0)
+#define ia64_andcm_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 1)
+#define ia64_or_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 2)
+#define ia64_xor_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 3)
+
+#define ia64_a2(code, qp, r1, r2, r3, x2a, ve, x4, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count2 (ct2d); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d - 1), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0)
+
+#define ia64_shladd_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 4, (count))
+#define ia64_shladdp4_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 6, (count))
+
+#define ia64_a3(code, qp, r1, imm8, r3, x2a, ve, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_imm8 ((imm8)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (imm8) & 0x7f, 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, sign_bit((imm8)), 36, (8), 37); } while (0)
+
+#define ia64_sub_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 9, 1)
+#define ia64_and_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 0)
+#define ia64_andcm_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 1)
+#define ia64_or_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 2)
+#define ia64_xor_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 3)
+
+#define ia64_a4(code, qp, r1, imm14, r3, x2a, ve) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_imm14 ((imm14)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((imm14) & 0x7f), 13, (r3), 20, (((guint64)(imm14) >> 7) & 0x3f), 27, (ve), 33, (x2a), 34, sign_bit ((imm14)), 36, (8), 37); } while (0)
+
+#define ia64_adds_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 2, 0)
+#define ia64_addp4_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 3, 0)
+
+#define ia64_a5(code, qp, r1, imm, r3) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_assert ((r3) < 4); check_imm22 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (r3), 20, (((guint64)(imm) >> 12) & 0x1f), 22, (((guint64)(imm) >> 7) & 0x1ff), 27, sign_bit ((imm)), 36, (9), 37); } while (0)
+
+#define ia64_addl_imm_pred(code, qp, r1, imm22, r3) ia64_a5 ((code), (qp), (r1), (imm22), (r3))
+
+#define ia64_a6(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0)
+
+#define ia64_cmp_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 0)
+#define ia64_cmp_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 0)
+#define ia64_cmp_eq_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 0, 0)
+#define ia64_cmp_lt_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 1)
+#define ia64_cmp_ltu_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 1)
+#define ia64_cmp_eq_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 0, 1)
+#define ia64_cmp_eq_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 1, 0)
+#define ia64_cmp_eq_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 1, 0)
+#define ia64_cmp_eq_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 1, 0)
+#define ia64_cmp_ne_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 1, 1)
+#define ia64_cmp_ne_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 1, 1)
+#define ia64_cmp_ne_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 1, 1)
+
+#define ia64_cmp4_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 0, 0)
+#define ia64_cmp4_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 0, 0)
+#define ia64_cmp4_eq_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 0, 0)
+#define ia64_cmp4_lt_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 0, 1)
+#define ia64_cmp4_ltu_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 0, 1)
+#define ia64_cmp4_eq_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 0, 1)
+#define ia64_cmp4_eq_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 1, 0)
+#define ia64_cmp4_eq_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 1, 0)
+#define ia64_cmp4_eq_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 1, 0)
+#define ia64_cmp4_ne_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 1, 1)
+#define ia64_cmp4_ne_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 1, 1)
+#define ia64_cmp4_ne_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 1, 1)
+
+/* Pseudo ops */
+#define ia64_cmp_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp_eq_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p2), (p1), (r2), (r3))
+
+#define ia64_cmp4_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_eq_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp4_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp4_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp4_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp4_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp4_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp4_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p2), (p1), (r2), (r3))
+
+#define ia64_a7(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0)
+
+#define ia64_cmp_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 0)
+#define ia64_cmp_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 0)
+#define ia64_cmp_gt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 0, 0)
+#define ia64_cmp_le_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 1)
+#define ia64_cmp_le_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 1)
+#define ia64_cmp_le_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 0, 1)
+#define ia64_cmp_ge_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 1, 0)
+#define ia64_cmp_ge_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 1, 0)
+#define ia64_cmp_ge_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 1, 0)
+#define ia64_cmp_lt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 1, 1)
+#define ia64_cmp_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 1, 1)
+#define ia64_cmp_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 1, 1)
+
+#define ia64_cmp4_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 0, 0)
+#define ia64_cmp4_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 0, 0)
+#define ia64_cmp4_gt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 0, 0)
+#define ia64_cmp4_le_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 0, 1)
+#define ia64_cmp4_le_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 0, 1)
+#define ia64_cmp4_le_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 0, 1)
+#define ia64_cmp4_ge_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 1, 0)
+#define ia64_cmp4_ge_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 0)
+#define ia64_cmp4_ge_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 0)
+#define ia64_cmp4_lt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 1, 1)
+#define ia64_cmp4_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 1)
+#define ia64_cmp4_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 1)
+
+#define ia64_a8(code, qp, p1, p2, imm, r3, opcode, x2, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r3)); check_imm8 ((imm)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, ((guint64)(imm) & 0x7f), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0)
+
+#define ia64_cmp_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 0)
+#define ia64_cmp_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 0)
+#define ia64_cmp_eq_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 0, 0)
+#define ia64_cmp_lt_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 1)
+#define ia64_cmp_ltu_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 1)
+#define ia64_cmp_eq_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 0, 1)
+#define ia64_cmp_eq_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 1, 0)
+#define ia64_cmp_eq_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 1, 0)
+#define ia64_cmp_eq_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 1, 0)
+#define ia64_cmp_ne_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 1, 1)
+#define ia64_cmp_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 1, 1)
+#define ia64_cmp_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 1, 1)
+
+#define ia64_cmp4_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 0, 0)
+#define ia64_cmp4_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 0, 0)
+#define ia64_cmp4_eq_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 0, 0)
+#define ia64_cmp4_lt_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 0, 1)
+#define ia64_cmp4_ltu_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 0, 1)
+#define ia64_cmp4_eq_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 0, 1)
+#define ia64_cmp4_eq_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 1, 0)
+#define ia64_cmp4_eq_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 0)
+#define ia64_cmp4_eq_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 0)
+#define ia64_cmp4_ne_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 1, 1)
+#define ia64_cmp4_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 1)
+#define ia64_cmp4_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 1)
+
+/* Pseudo ops */
+#define ia64_cmp_ne_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_eq_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp_le_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp_gt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp_ge_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp_leu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp_gtu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp_geu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+
+#define ia64_cmp4_ne_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_eq_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp4_le_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp4_gt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp4_ge_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp4_leu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp4_gtu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp4_geu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+
+#define ia64_a9(code, qp, r1, r2, r3, x2a, za, zb, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0)
+
+#define ia64_padd1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0)
+#define ia64_padd2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 0)
+#define ia64_padd4_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0)
+#define ia64_padd1_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 1)
+#define ia64_padd2_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 1)
+#define ia64_padd1_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2)
+#define ia64_padd2_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 2)
+#define ia64_padd1_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 3)
+#define ia64_padd2_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 3)
+
+#define ia64_psub1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 0)
+#define ia64_psub2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 0)
+#define ia64_psub4_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 1, 0)
+#define ia64_psub1_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 1)
+#define ia64_psub2_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 1)
+#define ia64_psub1_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 2)
+#define ia64_psub2_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 2)
+#define ia64_psub1_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 3)
+#define ia64_psub2_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 3)
+
+#define ia64_pavg1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2)
+#define ia64_pavg2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 2, 2)
+#define ia64_pavg1_raz_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 3)
+#define ia64_pavg2_raz_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 2, 3)
+#define ia64_pavgsub1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 3, 2)
+#define ia64_pavgsub2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 3, 2)
+#define ia64_pcmp1_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 9, 0)
+#define ia64_pcmp2_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 0)
+#define ia64_pcmp4_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 0)
+#define ia64_pcmp1_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 9, 1)
+#define ia64_pcmp2_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 1)
+#define ia64_pcmp4_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 1)
+
+#define ia64_a10(code, qp, r1, r2, r3, x2a, za, zb, x4, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count2 ((ct2d)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d) - 1, 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0)
+
+#define ia64_pshladd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 4, count);
+#define ia64_pshradd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 6, count);
+
+#define encode_pmpyshr_count(count) (((count) == 0) ? 0 : (((count) == 7) ? 1 : (((count) == 15) ? 2 : 3)))
+
+/*
+ * I-Unit Instructions
+ */
+
+#define ia64_i1(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_assert (((ct2d) == 0) | ((ct2d) == 7) | ((ct2d) == 15) | ((ct2d) == 16)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, encode_pmpyshr_count((ct2d)), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pmpyshr2_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 3, (count));
+
+#define ia64_pmpyshr2_u_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 1, (count));
+
+#define ia64_i2(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pmpy2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 1, 3)
+#define ia64_pmpy2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 3)
+#define ia64_mix1_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 2)
+#define ia64_mix2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2)
+#define ia64_mix4_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2)
+#define ia64_mix1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 2)
+#define ia64_mix2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2)
+#define ia64_mix4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2)
+#define ia64_pack2_uss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 0)
+#define ia64_pack2_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 0)
+#define ia64_pack4_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 0)
+#define ia64_unpack1_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 1)
+#define ia64_unpack2_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 1)
+#define ia64_unpack4_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 1)
+#define ia64_unpack1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 1)
+#define ia64_unpack2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 1)
+#define ia64_unpack4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 1)
+#define ia64_pmin1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 0)
+#define ia64_pmax1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 1)
+#define ia64_pmin2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 0)
+#define ia64_pmax2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 1)
+#define ia64_psad1_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 3, 2)
+
+typedef enum {
+ IA64_MUX1_BRCST = 0x0,
+ IA64_MUX1_MIX = 0x8,
+ IA64_MUX1_SHUF = 0x9,
+ IA64_MUX1_ALT = 0xa,
+ IA64_MUX1_REV = 0xb
+} Ia64Mux1Permutation;
+
+#define ia64_i3(code, qp, r1, r2, mbtype, opcode, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mbtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0)
+
+#define ia64_mux1_pred(code, qp, r1, r2, mbtype) ia64_i3 ((code), (qp), (r1), (r2), (mbtype), 7, 0, 0, 0, 3, 2, 2)
+
+#define ia64_i4(code, qp, r1, r2, mhtype, opcode, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mhtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0)
+
+#define ia64_mux2_pred(code, qp, r1, r2, mhtype) ia64_i4 ((code), (qp), (r1), (r2), (mhtype), 7, 0, 1, 0, 3, 2, 2)
+
+#define ia64_i5(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshr2_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 2, 0)
+#define ia64_pshr4_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2, 0)
+#define ia64_shr_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 2, 0)
+#define ia64_pshr2_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 0)
+#define ia64_pshr4_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 0)
+#define ia64_shr_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 0)
+
+#define ia64_i6(code, qp, r1, count, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (count), 14, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshr2_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 3, 0)
+#define ia64_pshr4_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 3, 0)
+#define ia64_pshr2_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 1, 0)
+#define ia64_pshr4_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 1, 0)
+
+#define ia64_i7(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshl2_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 1)
+#define ia64_pshl4_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 1)
+#define ia64_shl_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 1)
+
+#define ia64_i8(code, qp, r1, r2, count, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, 31 - (count), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshl2_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 0, 1, 0, 3, 1, 1)
+#define ia64_pshl4_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 1, 0, 0, 3, 1, 1)
+
+#define ia64_i9(code, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_popcnt_pred(code, qp, r1, r3) ia64_i9 ((code), (qp), (r1), (r3), 0, 1, 0, 1, 1, 2)
+
+#define ia64_i10(code, qp, r1, r2, r3, count, opcode, x2, x) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count6 ((count)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (count), 27, (x), 33, (x2), 34, (opcode), 37); } while (0)
+
+#define ia64_shrp_pred(code, qp, r1, r2, r3, count) ia64_i10 ((code), (qp), (r1), (r2), ( r3), (count), 5, 3, 0)
+
+#define ia64_i11(code, qp, r1, r3, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((pos) << 1) | (y), 13, (r3), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0)
+
+#define ia64_extr_u_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 0)
+#define ia64_extr_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 1)
+
+#define ia64_i12(code, qp, r1, r2, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0)
+
+#define ia64_dep_z_pred(code, qp, r1, r2, pos, len) ia64_i12 ((code), (qp), (r1), (r2), (pos), (len), 1, 1, 0)
+
+#define ia64_i13(code, qp, r1, imm, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_dep_z_imm_pred(code, qp, r1, imm, pos, len) ia64_i13 ((code), (qp), (r1), (imm), (pos), (len), 1, 1, 1)
+
+#define ia64_i14(code, qp, r1, imm, r3, pos, len, x2, x) do { read_pr ((code), (qp)); write_gr ((code), (r1)); check_imm1 (imm); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (63 - (pos)) << 1, 13, (r3), 20, (len), 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_dep_imm_pred(code, qp, r1, imm, r3, pos, len) ia64_i14 ((code), (qp), (r1), (imm), (r3), (pos), (len), 3, 1)
+
+#define ia64_i15(code, qp, r1, r2, r3, pos, len) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_len4 ((len)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (len) - 1, 27, (63 - (pos)), 31, (4), 37); } while (0)
+
+#define ia64_dep_pred(code, qp, r1, r2, r3, pos, len) ia64_i15 ((code), (qp), (r1), (r2), (r3), (pos), (len))
+
+#define ia64_i16(code, qp, p1, p2, r3, pos, x2, ta, tb, y, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (pos), 14, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0)
+
+#define ia64_tbit_z_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 0)
+#define ia64_tbit_z_unc_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 1)
+#define ia64_tbit_z_and_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 1, 0, 0)
+#define ia64_tbit_nz_and_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 1, 0, 1)
+#define ia64_tbit_z_or_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 0, 0, 0)
+#define ia64_tbit_nz_or_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 0, 0, 1)
+#define ia64_tbit_z_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 0)
+#define ia64_tbit_nz_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 1)
+
+#define ia64_i17(code, qp, p1, p2, r3, x2, ta, tb, y, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0)
+
+#define ia64_tnat_z_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 0)
+#define ia64_tnat_z_unc_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 1)
+#define ia64_tnat_z_and_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 1, 1, 0)
+#define ia64_tnat_nz_and_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 1, 1, 1)
+#define ia64_tnat_z_or_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 0, 1, 0)
+#define ia64_tnat_nz_or_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 0, 1, 1)
+#define ia64_tnat_z_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 0)
+#define ia64_tnat_nz_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 1)
+
+#define ia64_i18(code, qp, imm, x3, x6, y) do { read_pr ((code), (qp)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_nop_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 0)
+#define ia64_hint_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 1)
+
+#define ia64_i19(code, qp, imm, x3, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_break_i_pred(code, qp, imm) ia64_i19 ((code), (qp), (imm), 0, 0)
+
+#define ia64_i20(code, qp, r2, imm, x3) do { read_pr ((code), (qp)); check_imm21 ((imm)); read_gr ((code), (r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_chk_s_i_pred(code, qp,r2,disp) ia64_i20 ((code), (qp), (r2), (disp), 1)
+
+#define ia64_i21(code, qp, b1, r2, tag13, x3, x, ih, wh) do { read_pr ((code), (qp)); check_imm8 (tag13); write_br ((code), (b1)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (b1), 6, (r2), 13, (wh), 20, (x), 22, (ih), 23, (tag13) & 0x1ff, 24, (x3), 33, (0), 37); } while (0)
+
+typedef enum {
+ IA64_MOV_TO_BR_WH_SPTK = 0,
+ IA64_MOV_TO_BR_WH_NONE = 1,
+ IA64_MOV_TO_BR_WH_DPTK = 2
+} Ia64MovToBrWhetherHint;
+
+typedef enum {
+ IA64_BR_IH_NONE = 0,
+ IA64_BR_IH_IMP = 1
+} Ia64BranchImportanceHint;
+
+#define ia64_mov_to_br_hint_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 0, ih, wh)
+#define ia64_mov_ret_to_br_hint_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 1, ih, wh)
+
+/* Pseudo ops */
+
+#define ia64_mov_to_br_pred(code, qp, b1, r2) ia64_mov_to_br_hint_pred ((code), (qp), (b1), (r2), 0, 0, 0)
+#define ia64_mov_ret_to_br_pred(code, qp, b1, r2) ia64_mov_ret_to_br_hint_pred ((code), (qp), (b1), (r2), 0, 0, 0)
+
+/* End of pseudo ops */
+
+#define ia64_i22(code, qp, r1, b2, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_br ((code), (b2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (b2), 13, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_from_br_pred(code, qp, r1, b2) ia64_i22 ((code), (qp), (r1), (b2), 0, 0x31);
+
+#define ia64_i23(code, qp, r2, mask, x3) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (mask) & 0x7f, 6, (r2), 13, ((mask) >> 7) & 0xff, 24, (x3), 33, sign_bit ((mask)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_pred_pred(code, qp, r2, mask) ia64_i23 ((code), (qp), (r2), (mask) >> 1, 3)
+
+#define ia64_i24(code, qp, imm, x3) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7ffffff, 6, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_pred_rot_imm_pred(code, qp,imm) ia64_i24 ((code), (qp), (imm) >> 16, 2)
+
+#define ia64_i25(code, qp, r1, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_from_ip_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x30)
+#define ia64_mov_from_pred_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x33)
+
+#define ia64_i26(code, qp, ar3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_to_ar_i_pred(code, qp, ar3, r2) ia64_i26 ((code), (qp), (ar3), (r2), 0, 0x2a)
+
+#define ia64_i27(code, qp, ar3, imm, x3, x6) do { read_pr ((code), (qp)); write_ar ((code), (ar3)); check_imm8 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x6), 27, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_ar_imm_i_pred(code, qp, ar3, imm) ia64_i27 ((code), (qp), (ar3), (imm), 0, 0x0a)
+
+#define ia64_i28(code, qp, r1, ar3, x3, x6) do { read_pr ((code), (qp)); read_ar ((code), (ar3)); write_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_from_ar_i_pred(code, qp, r1, ar3) ia64_i28 ((code), (qp), (r1), (ar3), 0, 0x32)
+
+#define ia64_i29(code, qp, r1, r3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r3), 20, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_zxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x10)
+#define ia64_zxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x11)
+#define ia64_zxt4_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x12)
+#define ia64_sxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x14)
+#define ia64_sxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x15)
+#define ia64_sxt4_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x16)
+#define ia64_czx1_l_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x18)
+#define ia64_czx2_l_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x19)
+#define ia64_czx1_r_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x1C)
+#define ia64_czx2_r_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x1D)
+
+/*
+ * M-Unit Instructions
+ */
+
+typedef enum {
+ IA64_LD_HINT_NONE = 0,
+ IA64_LD_HINT_NT1 = 1,
+ IA64_LD_HINT_NTA = 3
+} Ia64LoadHint;
+
+typedef enum {
+ IA64_ST_HINT_NONE = 0,
+ IA64_ST_HINT_NTA = 3
+} Ia64StoreHint;
+
+#define ia64_m1(code, qp, r1, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_ld1_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x00)
+#define ia64_ld2_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x01)
+#define ia64_ld4_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x02)
+#define ia64_ld8_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x03)
+
+#define ia64_ld1_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x04)
+#define ia64_ld2_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x05)
+#define ia64_ld4_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x06)
+#define ia64_ld8_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x07)
+
+#define ia64_ld1_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x08)
+#define ia64_ld2_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x09)
+#define ia64_ld4_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0A)
+#define ia64_ld8_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0B)
+
+#define ia64_ld1_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0C)
+#define ia64_ld2_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0D)
+#define ia64_ld4_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0E)
+#define ia64_ld8_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0F)
+
+#define ia64_ld1_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x10)
+#define ia64_ld2_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x11)
+#define ia64_ld4_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x12)
+#define ia64_ld8_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x13)
+
+#define ia64_ld1_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x14)
+#define ia64_ld2_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x15)
+#define ia64_ld4_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x16)
+#define ia64_ld8_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x17)
+
+#define ia64_ld8_fill_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x1B)
+
+#define ia64_ld1_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x20)
+#define ia64_ld2_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x21)
+#define ia64_ld4_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x22)
+#define ia64_ld8_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x23)
+
+#define ia64_ld1_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x24)
+#define ia64_ld2_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x25)
+#define ia64_ld4_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x26)
+#define ia64_ld8_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x27)
+
+#define ia64_ld1_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x28)
+#define ia64_ld2_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x29)
+#define ia64_ld4_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2A)
+#define ia64_ld8_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2B)
+
+/* FIXME: This writes AR.CSD */
+#define ia64_ld16_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x28);
+#define ia64_ld16_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x2C)
+
+#define ia64_m2(code, qp, r1, r2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); write_gr ((code), (r3)); ; ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_ld1_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x00)
+#define ia64_ld2_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x01)
+#define ia64_ld4_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x02)
+#define ia64_ld8_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x03)
+
+#define ia64_ld1_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x04)
+#define ia64_ld2_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x05)
+#define ia64_ld4_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x06)
+#define ia64_ld8_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x07)
+
+#define ia64_ld1_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x08)
+#define ia64_ld2_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x09)
+#define ia64_ld4_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0A)
+#define ia64_ld8_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0B)
+
+#define ia64_ld1_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0C)
+#define ia64_ld2_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0D)
+#define ia64_ld4_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0E)
+#define ia64_ld8_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0F)
+
+#define ia64_ld1_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x10)
+#define ia64_ld2_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x11)
+#define ia64_ld4_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x12)
+#define ia64_ld8_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x13)
+
+#define ia64_ld1_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x14)
+#define ia64_ld2_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x15)
+#define ia64_ld4_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x16)
+#define ia64_ld8_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x17)
+
+#define ia64_ld8_fill_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x1B)
+
+#define ia64_ld1_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x20)
+#define ia64_ld2_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x21)
+#define ia64_ld4_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x22)
+#define ia64_ld8_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x23)
+
+#define ia64_ld1_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x24)
+#define ia64_ld2_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x25)
+#define ia64_ld4_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x26)
+#define ia64_ld8_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x27)
+
+#define ia64_ld1_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x28)
+#define ia64_ld2_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x29)
+#define ia64_ld4_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2A)
+#define ia64_ld8_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2B)
+
+#define ia64_m3(code, qp, r1, r3, imm, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_ld1_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x00)
+#define ia64_ld2_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x01)
+#define ia64_ld4_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x02)
+#define ia64_ld8_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x03)
+
+#define ia64_ld1_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x04)
+#define ia64_ld2_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x05)
+#define ia64_ld4_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x06)
+#define ia64_ld8_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x07)
+
+#define ia64_ld1_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x08)
+#define ia64_ld2_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x09)
+#define ia64_ld4_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0A)
+#define ia64_ld8_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0B)
+
+#define ia64_ld1_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0C)
+#define ia64_ld2_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0D)
+#define ia64_ld4_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0E)
+#define ia64_ld8_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0F)
+
+#define ia64_ld1_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x10)
+#define ia64_ld2_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x11)
+#define ia64_ld4_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x12)
+#define ia64_ld8_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x13)
+
+#define ia64_ld1_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x14)
+#define ia64_ld2_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x15)
+#define ia64_ld4_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x16)
+#define ia64_ld8_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x17)
+
+#define ia64_ld8_fill_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x1B)
+
+#define ia64_ld1_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x20)
+#define ia64_ld2_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x21)
+#define ia64_ld4_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x22)
+#define ia64_ld8_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x23)
+
+#define ia64_ld1_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x24)
+#define ia64_ld2_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x25)
+#define ia64_ld4_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x26)
+#define ia64_ld8_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x27)
+
+#define ia64_ld1_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x28)
+#define ia64_ld2_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x29)
+#define ia64_ld4_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2A)
+#define ia64_ld8_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2B)
+
+/* Pseudo ops */
+
+#define ia64_ld1_pred(code, qp, r1, r3) ia64_ld1_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_pred(code, qp, r1, r3) ia64_ld2_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_pred(code, qp, r1, r3) ia64_ld4_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_pred(code, qp, r1, r3) ia64_ld8_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_s_pred(code, qp, r1, r3) ia64_ld1_s_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_s_pred(code, qp, r1, r3) ia64_ld2_s_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_s_pred(code, qp, r1, r3) ia64_ld4_s_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_s_pred(code, qp, r1, r3) ia64_ld8_s_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_a_pred(code, qp, r1, r3) ia64_ld1_a_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_a_pred(code, qp, r1, r3) ia64_ld2_a_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_a_pred(code, qp, r1, r3) ia64_ld4_a_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_a_pred(code, qp, r1, r3) ia64_ld8_a_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_sa_pred(code, qp, r1, r3) ia64_ld1_sa_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_sa_pred(code, qp, r1, r3) ia64_ld2_sa_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_sa_pred(code, qp, r1, r3) ia64_ld4_sa_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_sa_pred(code, qp, r1, r3) ia64_ld8_sa_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_bias_pred(code, qp, r1, r3) ia64_ld1_bias_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_bias_pred(code, qp, r1, r3) ia64_ld2_bias_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_bias_pred(code, qp, r1, r3) ia64_ld4_bias_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_bias_pred(code, qp, r1, r3) ia64_ld8_bias_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_acq_pred(code, qp, r1, r3) ia64_ld1_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_acq_pred(code, qp, r1, r3) ia64_ld2_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_acq_pred(code, qp, r1, r3) ia64_ld4_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_acq_pred(code, qp, r1, r3) ia64_ld8_acq_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld8_fill_pred(code, qp, r1, r3) ia64_ld8_fill_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_c_clr_pred(code, qp, r1, r3) ia64_ld1_c_clr_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_c_clr_pred(code, qp, r1, r3) ia64_ld2_c_clr_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_c_clr_pred(code, qp, r1, r3) ia64_ld4_c_clr_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_c_clr_pred(code, qp, r1, r3) ia64_ld8_c_clr_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_c_nc_pred(code, qp, r1, r3) ia64_ld1_c_nc_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_c_nc_pred(code, qp, r1, r3) ia64_ld2_c_nc_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_c_nc_pred(code, qp, r1, r3) ia64_ld4_c_nc_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_c_nc_pred(code, qp, r1, r3) ia64_ld8_c_nc_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_c_clr_acq_pred(code, qp, r1, r3) ia64_ld1_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_c_clr_acq_pred(code, qp, r1, r3) ia64_ld2_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_c_clr_acq_pred(code, qp, r1, r3) ia64_ld4_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_c_clr_acq_pred(code, qp, r1, r3) ia64_ld8_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld16_pred(code, qp, r1, r3) ia64_ld16_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld16_acq_pred(code, qp, r1, r3) ia64_ld16_acq_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_inc_pred(code, qp, r1, r2, r3) ia64_ld1_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_inc_pred(code, qp, r1, r2, r3) ia64_ld2_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_inc_pred(code, qp, r1, r2, r3) ia64_ld4_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_inc_pred(code, qp, r1, r2, r3) ia64_ld8_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_s_inc_pred(code, qp, r1, r2, r3) ia64_ld1_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_s_inc_pred(code, qp, r1, r2, r3) ia64_ld2_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_s_inc_pred(code, qp, r1, r2, r3) ia64_ld4_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_s_inc_pred(code, qp, r1, r2, r3) ia64_ld8_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_a_inc_pred(code, qp, r1, r2, r3) ia64_ld1_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_a_inc_pred(code, qp, r1, r2, r3) ia64_ld2_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_a_inc_pred(code, qp, r1, r2, r3) ia64_ld4_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_a_inc_pred(code, qp, r1, r2, r3) ia64_ld8_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld1_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld2_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld4_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld8_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld1_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld2_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld4_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld8_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld1_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld2_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld4_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld8_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld8_fill_inc_pred(code, qp, r1, r2, r3) ia64_ld8_fill_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld8_fill_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_fill_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+/* End of pseudo ops */
+
+#define ia64_m4(code, qp, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_st1_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x30)
+#define ia64_st2_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x31)
+#define ia64_st4_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x32)
+#define ia64_st8_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x33)
+
+/* Pseudo ops */
+
+#define ia64_st8_pred(code, qp, r3, r2) ia64_st8_hint_pred ((code), (qp), (r3), (r2), 0)
+
+/* End of pseudo ops */
+
+#define ia64_st1_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x34)
+#define ia64_st2_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x35)
+#define ia64_st4_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x36)
+#define ia64_st8_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x37)
+
+#define ia64_st8_spill_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x3B)
+
+#define ia64_st16_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x30)
+#define ia64_st16_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x34)
+
+#define ia64_m5(code, qp, r3, r2, imm, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_st1_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x30)
+#define ia64_st2_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x31)
+#define ia64_st4_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x32)
+#define ia64_st8_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x33)
+
+#define ia64_st1_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x34)
+#define ia64_st2_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x35)
+#define ia64_st4_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x36)
+#define ia64_st8_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x37)
+
+#define ia64_st8_spill_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x3B)
+
+#define ia64_m6(code, qp, f1, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfs_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x02)
+#define ia64_ldfd_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x03)
+#define ia64_ldf8_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x01)
+#define ia64_ldfe_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x00)
+
+#define ia64_ldfs_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x06)
+#define ia64_ldfd_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x07)
+#define ia64_ldf8_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x05)
+#define ia64_ldfe_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x04)
+
+#define ia64_ldfs_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0A)
+#define ia64_ldfd_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0B)
+#define ia64_ldf8_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x09)
+#define ia64_ldfe_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x08)
+
+#define ia64_ldfs_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0E)
+#define ia64_ldfd_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0F)
+#define ia64_ldf8_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0D)
+#define ia64_ldfe_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0C)
+
+#define ia64_ldfs_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x22)
+#define ia64_ldfd_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x23)
+#define ia64_ldf8_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x21)
+#define ia64_ldfe_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x20)
+
+#define ia64_ldfs_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x26)
+#define ia64_ldfd_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x27)
+#define ia64_ldf8_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x25)
+#define ia64_ldfe_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x24)
+
+#define ia64_ldf_fill_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x1B)
+
+#define ia64_m7(code, qp, f1, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfs_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x02)
+#define ia64_ldfd_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x03)
+#define ia64_ldf8_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x01)
+#define ia64_ldfe_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x00)
+
+#define ia64_ldfs_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x06)
+#define ia64_ldfd_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x07)
+#define ia64_ldf8_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x05)
+#define ia64_ldfe_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x04)
+
+#define ia64_ldfs_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0A)
+#define ia64_ldfd_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0B)
+#define ia64_ldf8_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x09)
+#define ia64_ldfe_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x08)
+
+#define ia64_ldfs_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0E)
+#define ia64_ldfd_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0F)
+#define ia64_ldf8_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0D)
+#define ia64_ldfe_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0C)
+
+#define ia64_ldfs_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x22)
+#define ia64_ldfd_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x23)
+#define ia64_ldf8_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x21)
+#define ia64_ldfe_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x20)
+
+#define ia64_ldfs_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x26)
+#define ia64_ldfd_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x27)
+#define ia64_ldf8_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x25)
+#define ia64_ldfe_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x24)
+
+#define ia64_ldf_fill_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x1B)
+
+#define ia64_m8(code, qp, f1, r3, imm, hint, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0)
+
+#define ia64_ldfs_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x02)
+#define ia64_ldfd_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x03)
+#define ia64_ldf8_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x01)
+#define ia64_ldfe_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x00)
+
+#define ia64_ldfs_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x06)
+#define ia64_ldfd_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x07)
+#define ia64_ldf8_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x05)
+#define ia64_ldfe_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x04)
+
+#define ia64_ldfs_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0A)
+#define ia64_ldfd_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0B)
+#define ia64_ldf8_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x09)
+#define ia64_ldfe_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x08)
+
+#define ia64_ldfs_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0E)
+#define ia64_ldfd_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0F)
+#define ia64_ldf8_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0D)
+#define ia64_ldfe_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0C)
+
+#define ia64_ldfs_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x22)
+#define ia64_ldfd_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x23)
+#define ia64_ldf8_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x21)
+#define ia64_ldfe_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x20)
+
+#define ia64_ldfs_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x26)
+#define ia64_ldfd_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x27)
+#define ia64_ldf8_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x25)
+#define ia64_ldfe_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x24)
+
+#define ia64_ldf_fill_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x1B)
+
+/* Pseudo ops */
+
+#define ia64_ldfs_pred(code, qp, f1, r3) ia64_ldfs_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_pred(code, qp, f1, r3) ia64_ldfd_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_pred(code, qp, f1, r3) ia64_ldf8_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_pred(code, qp, f1, r3) ia64_ldfe_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_s_pred(code, qp, f1, r3) ia64_ldfs_s_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_s_pred(code, qp, f1, r3) ia64_ldfd_s_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_s_pred(code, qp, f1, r3) ia64_ldf8_s_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_s_pred(code, qp, f1, r3) ia64_ldfe_s_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_a_pred(code, qp, f1, r3) ia64_ldfs_a_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_a_pred(code, qp, f1, r3) ia64_ldfd_a_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_a_pred(code, qp, f1, r3) ia64_ldf8_a_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_a_pred(code, qp, f1, r3) ia64_ldfe_a_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_sa_pred(code, qp, f1, r3) ia64_ldfs_sa_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_sa_pred(code, qp, f1, r3) ia64_ldfd_sa_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_sa_pred(code, qp, f1, r3) ia64_ldf8_sa_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_sa_pred(code, qp, f1, r3) ia64_ldfe_sa_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_c_clr_pred(code, qp, f1, r3) ia64_ldfs_c_clr_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_c_clr_pred(code, qp, f1, r3) ia64_ldfd_c_clr_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_c_clr_pred(code, qp, f1, r3) ia64_ldf8_c_clr_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_c_clr_pred(code, qp, f1, r3) ia64_ldfe_c_clr_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_c_nc_pred(code, qp, f1, r3) ia64_ldfs_c_nc_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_c_nc_pred(code, qp, f1, r3) ia64_ldfd_c_nc_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_c_nc_pred(code, qp, f1, r3) ia64_ldf8_c_nc_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_c_nc_pred(code, qp, f1, r3) ia64_ldfe_c_nc_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldf_fill_pred(code, qp, f1, r3) ia64_ldf_fill_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_s_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_a_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldf_fill_inc_pred(code, qp, f1, r3, r2) ia64_ldf_fill_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldf_fill_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf_fill_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+/* End of pseudo ops */
+
+#define ia64_m9(code, qp, r3, f2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_stfs_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x32)
+#define ia64_stfd_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x33)
+#define ia64_stf8_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x31)
+#define ia64_stfe_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x30)
+#define ia64_stf_spill_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x3B)
+
+#define ia64_m10(code, qp, r3, f2, imm, hint, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_fr ((code), (f2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0)
+
+#define ia64_stfs_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x32)
+#define ia64_stfd_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x33)
+#define ia64_stf8_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x31)
+#define ia64_stfe_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x30)
+#define ia64_stf_spill_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x3B)
+
+#define ia64_m11(code, qp, f1, f2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_fr ((code), (f2)); read_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfps_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x02)
+#define ia64_ldfpd_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x03)
+#define ia64_ldfp8_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x01)
+
+#define ia64_ldfps_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x06)
+#define ia64_ldfpd_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x07)
+#define ia64_ldfp8_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x05)
+
+#define ia64_ldfps_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0A)
+#define ia64_ldfpd_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0B)
+#define ia64_ldfp8_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x09)
+
+#define ia64_ldfps_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0E)
+#define ia64_ldfpd_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0F)
+#define ia64_ldfp8_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0D)
+
+#define ia64_ldfps_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x22)
+#define ia64_ldfpd_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x23)
+#define ia64_ldfp8_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x21)
+
+#define ia64_ldfps_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x26)
+#define ia64_ldfpd_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x27)
+#define ia64_ldfp8_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x25)
+
+#define ia64_m12(code, qp, f1, f2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_fr ((code), (f2)); read_gr ((code), (r3)); write_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfps_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x02)
+#define ia64_ldfpd_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x03)
+#define ia64_ldfp8_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x01)
+
+#define ia64_ldfps_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x06)
+#define ia64_ldfpd_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x07)
+#define ia64_ldfp8_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x05)
+
+#define ia64_ldfps_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0A)
+#define ia64_ldfpd_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0B)
+#define ia64_ldfp8_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x09)
+
+#define ia64_ldfps_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0E)
+#define ia64_ldfpd_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0F)
+#define ia64_ldfp8_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0D)
+
+#define ia64_ldfps_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x22)
+#define ia64_ldfpd_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x23)
+#define ia64_ldfp8_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x21)
+
+#define ia64_ldfps_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x26)
+#define ia64_ldfpd_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x27)
+#define ia64_ldfp8_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x25)
+
+typedef enum {
+ IA64_LFHINT_NONE = 0,
+ IA64_LFHINT_NT1 = 1,
+ IA64_LFHINT_NT2 = 2,
+ IA64_LFHINT_NTA = 3
+} Ia64LinePrefetchHint;
+
+#define ia64_m13(code, qp, r3, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_lfetch_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2C)
+#define ia64_lfetch_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2D)
+#define ia64_lfetch_fault_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2E)
+#define ia64_lfetch_fault_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2F)
+
+#define ia64_m14(code, qp, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_lfetch_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2C)
+#define ia64_lfetch_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2D)
+#define ia64_lfetch_fault_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2E)
+#define ia64_lfetch_fault_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2F)
+
+#define ia64_m15(code, qp, r3, imm, hint, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0)
+
+#define ia64_lfetch_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2C)
+#define ia64_lfetch_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2D)
+#define ia64_lfetch_fault_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2E)
+#define ia64_lfetch_fault_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2F)
+
+#define ia64_m16(code, qp, r1, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_cmpxchg1_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x00)
+#define ia64_cmpxchg2_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x01)
+#define ia64_cmpxchg4_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x02)
+#define ia64_cmpxchg8_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x03)
+#define ia64_cmpxchg1_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x04)
+#define ia64_cmpxchg2_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x05)
+#define ia64_cmpxchg4_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x06)
+#define ia64_cmpxchg8_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x07)
+#define ia64_cmpxchg16_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x20)
+#define ia64_cmpxchg16_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x24)
+#define ia64_xchg1_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x08)
+#define ia64_xchg2_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x09)
+#define ia64_xchg4_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x0A)
+#define ia64_xchg8_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x0B)
+
+#define encode_inc3(inc3) ((inc3) == 16 ? 0 : ((inc3) == 8 ? 1 : ((inc3) == 4 ? 2 : 3)))
+
+#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { int aimm; read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); aimm = (imm) < 0 ? - (imm) : (imm); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_fetchadd4_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12)
+#define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x13)
+#define ia64_fetchadd4_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x16)
+#define ia64_fetchadd8_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x17)
+
+#define ia64_m18(code, qp, f1, r2, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_fr ((code), (f1)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (x), 27, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_setf_sig_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1C)
+#define ia64_setf_exp_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1D)
+#define ia64_setf_s_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1E)
+#define ia64_setf_d_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1F)
+
+#define ia64_m19(code, qp, r1, f2, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (f2), 13, (x), 27, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_getf_sig_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1C)
+#define ia64_getf_exp_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1D)
+#define ia64_getf_s_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1E)
+#define ia64_getf_d_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1F)
+
+#define ia64_m20(code, qp, r2, imm, x3) do { read_pr ((code), (qp)); read_gr ((code), (r2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0)
+
+#define ia64_chk_s_m_pred(code, qp,r2,disp) ia64_m20 ((code), (qp), (r2), (disp), 1)
+
+#define ia64_m21(code, qp, f2, imm, x3) do { read_pr ((code), (qp)); read_fr ((code), (f2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0)
+
+#define ia64_chk_s_float_m_pred(code, qp,f2,disp) ia64_m21 ((code), (qp), (f2), (disp), 3)
+
+#define ia64_m22(code, qp, r1, imm, x3) do { read_pr ((code), (qp)); read_gr ((code), (r1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_chk_a_nc_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 4)
+#define ia64_chk_a_clr_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 5)
+
+#define ia64_m23(code, qp, f1, imm, x3) do { read_pr ((code), (qp)); read_fr ((code), (f1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_chk_a_nc_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 6)
+#define ia64_chk_a_clr_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 7)
+
+#define ia64_m24(code, qp, x3, x4, x2) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_invala_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 1)
+#define ia64_fwb_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 2)
+#define ia64_mf_pred(code, qp) ia64_m24 ((code), (qp), 0, 2, 2)
+#define ia64_mf_a_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 2)
+#define ia64_srlz_d_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 3)
+#define ia64_stlz_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 1, 3)
+#define ia64_sync_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 3)
+
+#define ia64_m25(code, qp, x3, x4, x2) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_flushrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0xC, 0)
+#define ia64_loadrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0XA, 0)
+
+#define ia64_m26(code, qp, r1, x3, x4, x2) do { read_pr ((code), (qp)); read_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_invala_e_pred(code, qp, r1) ia64_m26 ((code), (qp), (r1), 0, 2, 1)
+
+#define ia64_m27(code, qp, f1, x3, x4, x2) do { read_pr ((code), (qp)); read_fr ((code), (f1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_invala_e_float_pred(code, qp, f1) ia64_m26 ((code), (qp), (f1), 0, 3, 1)
+
+#define ia64_m28(code, qp, r3, x3, x6, x) do { read_pr ((code), (qp)); read_gr ((code), (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x6), 27, (x3), 33, (x), 36, (1), 37); } while (0)
+
+#define ia64_fc_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 0)
+#define ia64_fc_i_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 1)
+
+#define ia64_m29(code, qp, ar3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_to_ar_m_pred(code, qp, ar3, r2) ia64_m29 ((code), (qp), (ar3), (r2), 0, 0x2a)
+
+#define ia64_m30(code, qp, ar3, imm, x3, x4, x2) do { read_pr ((code), (qp)); read_ar ((code), (ar3)); check_imm8 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_ar_imm_m_pred(code, qp, ar3, imm) ia64_m30 ((code), (qp), (ar3), (imm), 0, 8, 2)
+
+#define ia64_m31(code, qp, r1, ar3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_from_ar_m_pred(code, qp, r1, ar3) ia64_m31 ((code), (qp), (r1), (ar3), 0, 0x22)
+
+#define ia64_m32(code, qp, cr3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_to_cr_pred(code, qp, cr3, r2) ia64_m32 ((code), (qp), (cr3), (r2), 0, 0x2C)
+
+#define ia64_m33(code, qp, r1, cr3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_from_cr_pred(code, qp, r1, cr3) ia64_m33 ((code), (qp), (r1), (cr3), 0, 0x24)
+
+#define ia64_m34(code, qp, r1, sor, sol, sof, x3) do { ia64_begin_bundle ((code)); read_pr ((code), (qp)); write_gr ((code), (r1)); check_assert ((guint64)(sor) <= 0xf); check_assert ((guint64)(sol) <= 0x7f); check_assert ((guint64)(sof) <= 96); check_assert ((code).nins == 0); check_assert ((qp) == 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (sof), 13, (sol), 20, (sor), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_alloc_pred(code, qp, r1, i, l, o, r) do { read_pr ((code), (qp)); check_assert (((r) % 8) == 0); check_assert ((r) <= (i) + (l) + (o)); ia64_m34 ((code), (qp), (r1), (r) >> 3, (i) + (l), (i) + (l) + (o), 6); } while (0)
+
+#define ia64_m35(code, qp, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_to_psr_l_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x2D)
+#define ia64_mov_to_psr_um_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x29)
+
+#define ia64_m36(code, qp, r1, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_from_psr_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x25)
+#define ia64_mov_from_psr_um_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x21)
+
+#define ia64_m37(code, qp, imm, x3, x2, x4) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_break_m_pred(code, qp, imm) ia64_m37 ((code), (qp), (imm), 0, 0, 0)
+
+/* The System/Memory Management instruction encodings (M38-M47) are missing */
+
+#define ia64_m48(code, qp, imm, x3, x4, x2, y) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_nop_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 0)
+#define ia64_hint_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 1)
+
+typedef enum {
+ IA64_BWH_SPTK = 0,
+ IA64_BWH_SPNT = 1,
+ IA64_BWH_DPTK = 2,
+ IA64_BWH_DPNT = 3
+} Ia64BranchWhetherHint;
+
+typedef enum {
+ IA64_PH_FEW = 0,
+ IA64_PH_MANY = 1
+} Ia64SeqPrefetchHint;
+
+typedef enum {
+ IA64_DH_NONE = 0,
+ IA64_DH_CLR = 1
+} Ia64BranchCacheDeallocHint;
+
+#define ia64_b1(code, qp, imm, bwh, ph, dh, btype) do { read_pr_branch ((code), (qp)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0)
+
+#define ia64_br_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 0)
+#define ia64_br_wexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 2)
+#define ia64_br_wtop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 3)
+
+#define ia64_b2(code, qp, imm, bwh, ph, dh, btype) do { read_pr ((code), (qp)); check_imm21 ((imm)); check_assert ((qp) == 0); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0)
+
+#define ia64_br_cloop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 5)
+#define ia64_br_cexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 6)
+#define ia64_br_ctop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 7)
+
+#define ia64_b3(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); ia64_begin_bundle ((code)); } while (0)
+
+#define ia64_br_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_b3 ((code), (qp), (b1), (disp), (bwh), (ph), (dh))
+
+#define ia64_b4(code, qp, b2, bwh, ph, dh, x6, btype) do { read_pr ((code), (qp)); read_br_branch ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0)
+
+#define ia64_br_cond_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 0)
+#define ia64_br_ia_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 1)
+#define ia64_br_ret_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x21, 4)
+
+#define ia64_b5(code, qp, b1, b2, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); read_br_branch ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); ia64_begin_bundle ((code)); } while (0)
+
+#define ia64_br_call_reg_hint_pred(code, qp, b1, b2, bwh, ph, dh) ia64_b5 ((code), (qp), (b1), (b2), (bwh), (ph), (dh))
+
+/* Pseudo ops */
+
+#define ia64_br_cond_pred(code, qp, disp) ia64_br_cond_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_wexit_pred(code, qp, disp) ia64_br_wexit_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_wtop_pred(code, qp, disp) ia64_br_wtop_hint_pred (code, qp, disp, 0, 0, 0)
+
+#define ia64_br_cloop_pred(code, qp, disp) ia64_br_cloop_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_cexit_pred(code, qp, disp) ia64_br_cexit_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_ctop_pred(code, qp, disp) ia64_br_ctop_hint_pred (code, qp, disp, 0, 0, 0)
+
+#define ia64_br_call_pred(code, qp, b1, disp) ia64_br_call_hint_pred (code, qp, b1, disp, 0, 0, 0)
+
+#define ia64_br_cond_reg_pred(code, qp, b1) ia64_br_cond_reg_hint_pred (code, qp, b1, 0, 0, 0)
+#define ia64_br_ia_reg_pred(code, qp, b1) ia64_br_ia_reg_hint_pred (code, qp, b1, 0, 0, 0)
+#define ia64_br_ret_reg_pred(code, qp, b1) ia64_br_ret_reg_hint_pred (code, qp, b1, 0, 0, 0)
+
+#define ia64_br_call_reg_pred(code, qp, b1, b2) ia64_br_call_reg_hint_pred (code, qp, b1, b2, 0, 0, 0)
+
+/* End of pseudo ops */
+
+typedef enum {
+ IA64_IPWH_SPTK = 0,
+ IA64_IPWH_LOOP = 1,
+ IA64_IPWH_DPTK = 2,
+ IA64_IPWH_EXIT = 3
+} Ia64IPRelativeBranchWhetherHint;
+
+/* B6 and B7 is missing */
+
+#define ia64_b8(code, qp, x6) do { read_pr ((code), (qp)); ia64_emit_ins_3 ((code), IA64_INS_TYPE_B, (qp), 0, (x6), 27, (0), 37); } while (0)
+
+#define ia64_cover_pred(code, qp) ia64_b8 ((code), (qp), 0x02)
+#define ia64_clrrrb_pred(code, qp) ia64_b8 ((code), (qp), 0x04)
+#define ia64_clrrrb_pr_pred(code, qp) ia64_b8 ((code), (qp), 0x05)
+#define ia64_rfi_pred(code, qp) ia64_b8 ((code), (qp), 0x08)
+#define ia64_bsw_0_pred(code, qp) ia64_b8 ((code), (qp), 0x0C)
+#define ia64_bsw_1_pred(code, qp) ia64_b8 ((code), (qp), 0x0D)
+#define ia64_epc_pred(code, qp) ia64_b8 ((code), (qp), 0x10)
+
+#define ia64_b9(code, qp, imm, opcode, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_B, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0)
+
+#define ia64_break_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 0, 0x00)
+#define ia64_nop_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x00)
+#define ia64_hint_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x01)
+
+/*
+ * F-Unit Instructions
+ */
+
+#define ia64_f1(code, qp, f1, f3, f4, f2, sf, opcode, x) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); read_fr ((code), (f4)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (sf), 34, (x), 36, (opcode), 37); } while (0)
+
+#define ia64_fma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 0)
+#define ia64_fma_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 1)
+#define ia64_fma_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 9, 0)
+#define ia64_fpma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 9, 1)
+#define ia64_fms_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xA, 0)
+#define ia64_fms_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xA, 1)
+#define ia64_fms_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xB, 0)
+#define ia64_fpms_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xB, 1)
+#define ia64_fnma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xC, 0)
+#define ia64_fnma_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xC, 1)
+#define ia64_fnma_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 0)
+#define ia64_fpnma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 1)
+
+/* Pseudo ops */
+#define ia64_fnorm_s_sf_pred(code, qp, f1, f3, sf) ia64_fma_s_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf))
+#define ia64_fnorm_d_sf_pred(code, qp, f1, f3, sf) ia64_fma_d_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf))
+
+#define ia64_f2(code, qp, f1, f3, f4, f2, opcode, x, x2) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f3)); read_fr ((code), (f4)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x2), 34, (x), 36, (opcode), 37); } while (0)
+
+#define ia64_xma_l_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 0)
+#define ia64_xma_h_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 3)
+#define ia64_xma_hu_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 2)
+
+/* Pseudo ops */
+#define ia64_xmpy_l_pred(code, qp, f1, f3, f4) ia64_xma_l_pred ((code), (qp), (f1), (f3), (f4), 0)
+#define ia64_xmpy_lu_pred(code, qp, f1, f3, f4) ia64_xma_l_pred ((code), (qp), (f1), (f3), (f4), 0)
+#define ia64_xmpy_h_pred(code, qp, f1, f3, f4) ia64_xma_h_pred ((code), (qp), (f1), (f3), (f4), 0)
+#define ia64_xmpy_hu_pred(code, qp, f1, f3, f4) ia64_xma_hu_pred ((code), (qp), (f1), (f3), (f4), 0)
+
+#define ia64_f3(code, qp, f1, f3, f4, f2, opcode, x) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f3)); read_fr ((code), (f4)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x), 36, (opcode), 37); } while (0)
+
+#define ia64_fselect_pred(code, qp, f1, f3, f4, f2) ia64_f3 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 0)
+
+#define ia64_f4(code, qp, p1, p2, f2, f3, sf, opcode, ra, rb, ta) do { read_pr ((code), (qp)); read_fr ((code), (f2)); read_fr ((code), (f3)); write_pr_fp ((code), (p1)); write_pr_fp ((code), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (f3), 20, (p2), 27, (ra), 33, (sf), 34, (rb), 36, (opcode), 37); } while (0)
+
+#define ia64_fcmp_eq_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 0, 0)
+#define ia64_fcmp_lt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 1, 0)
+#define ia64_fcmp_le_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 0, 0)
+#define ia64_fcmp_unord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 1, 0)
+#define ia64_fcmp_eq_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 0, 1)
+#define ia64_fcmp_lt_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 1, 1)
+#define ia64_fcmp_le_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 0, 1)
+#define ia64_fcmp_unord_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 1, 1)
+
+/* Pseudo ops */
+#define ia64_fcmp_gt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p1), (p2), (f3), (f2), (sf))
+#define ia64_fcmp_ge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p1), (p2), (f3), (f2), (sf))
+#define ia64_fcmp_ne_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_eq_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+#define ia64_fcmp_nlt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+#define ia64_fcmp_nle_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+#define ia64_fcmp_ngt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf))
+#define ia64_fcmp_nge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf))
+#define ia64_fcmp_ord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+
+#define ia64_f5(code, qp, p1, p2, f2, fclass, opcode, ta) do { read_pr ((code), (qp)); write_pr_fp ((code), (p1)); write_pr_fp ((code), (p2)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (((guint64)(fclass)) >> 2) & 0x7f, 20, (p2), 27, ((guint64)(fclass)) & 0x3, 33, (opcode), 37); } while (0)
+
+#define ia64_fclass_m_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 0)
+#define ia64_fclass_m_unc_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 1)
+
+#define ia64_f6(code, qp, f1, p2, f2, f3, sf, opcode, x, q) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_pr_fp ((code), (p2)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0)
+
+#define ia64_frcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 0, 1, 0)
+#define ia64_fprcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 1, 1, 0)
+
+#define ia64_f7(code, qp, f1, p2, f3, sf, opcode, x, q) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_pr_fp ((code), (p2)); read_fr ((code), (f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0)
+
+#define ia64_frsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 0, 1, 1)
+#define ia64_fprsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 1, 1, 1)
+
+#define ia64_f8(code, qp, f1, f2, f3, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fmin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x14)
+#define ia64_fman_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x15)
+#define ia64_famin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x16)
+#define ia64_famax_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x17)
+#define ia64_fpmin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x14)
+#define ia64_fpman_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x15)
+#define ia64_fpamin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x16)
+#define ia64_fpamax_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x17)
+#define ia64_fpcmp_eq_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x30)
+#define ia64_fpcmp_lt_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x31)
+#define ia64_fpcmp_le_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x32)
+#define ia64_fpcmp_unord_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x33)
+#define ia64_fpcmp_neq_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x34)
+#define ia64_fpcmp_nlt_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x35)
+#define ia64_fpcmp_nle_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x36)
+#define ia64_fpcmp_ord_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x37)
+
+#define ia64_f9(code, qp, f1, f2, f3, opcode, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (opcode), 37); } while (0)
+
+#define ia64_fmerge_s_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x10)
+#define ia64_fmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11)
+#define ia64_fmerge_se_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x12)
+#define ia64_fmix_lr_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x39)
+#define ia64_fmix_r_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3A)
+#define ia64_fmix_l_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3B)
+#define ia64_fsxt_r_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3C)
+#define ia64_fsxt_l_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3D)
+#define ia64_fpack_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x28)
+#define ia64_fswap_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x34)
+#define ia64_fswap_nl_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x35)
+#define ia64_fswap_nr_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x36)
+#define ia64_fand_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2C)
+#define ia64_fandcm_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2D)
+#define ia64_for_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2E)
+#define ia64_fxor_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2F)
+#define ia64_fpmerge_s_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x10)
+#define ia64_fpmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11)
+#define ia64_fpmerge_se_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x12)
+
+/* Pseudo ops */
+#define ia64_fmov_pred(code, qp, f1, f3) ia64_fmerge_s_pred ((code), (qp), (f1), (f3), (f3))
+
+#define ia64_f10(code, qp, f1, f2, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x18)
+#define ia64_fcvt_fxu_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x19)
+#define ia64_fcvt_fx_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x1A)
+#define ia64_fcvt_fxu_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x1B)
+#define ia64_fpcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x18)
+#define ia64_fpcvt_fxu_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x19)
+#define ia64_fpcvt_fx_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1A)
+#define ia64_fpcvt_fxu_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1B)
+
+#define ia64_f11(code, qp, f1, f2, opcode, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 34, (opcode), 37); } while (0)
+
+#define ia64_fcvt_xf_pred(code, qp, f1, f2) ia64_f11 ((code), (qp), (f1), (f2), 0, 0, 0x1C)
+
+#define ia64_f12(code, qp, amask, omask, sf, opcode, x, x6) do { read_pr ((code), (qp)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (amask) & 0x3f, 13, (omask) & 0x3f, 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fsetc_sf_pred(code, qp, amask, omask, sf) ia64_f12 ((code), (qp), (amask), (omask), (sf), 0, 0, 0x04)
+
+#define ia64_f13(code, qp, sf, opcode, x, x6) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_F, (qp), 0, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fclrf_sf_pred(code, qp, sf) ia64_f13 ((code), (qp), (sf), 0, 0, 0x05)
+
+#define ia64_f14(code, qp, imm, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, (sf), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0)
+
+#define ia64_fchkf_sf_pred(code, qp, disp, sf) ia64_f14 ((code), (qp), (disp), (sf), 0, 0, 0x8)
+
+#define ia64_f15(code, qp, imm, opcode, x, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0)
+
+#define ia64_break_f_pred(code, qp, imm) ia64_f15 ((code), (qp), (imm), 0, 0, 0x0)
+
+/*
+ * X-UNIT ENCODINGS
+ */
+
+#define ia64_x1(code, qp, imm, x3, x6) do { read_pr ((code), (qp)); check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_6 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00)
+
+#define ia64_x2(code, qp, r1, imm, vc) do { if (code.nins > IA64_INS_BUFFER_SIZE - 2) ia64_emit_bundle (&(code), FALSE); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((gint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (gint64)(imm) & 0x7f, (13), (vc), 20, ((gint64)(imm) >> 21) & 0x1, 21, ((gint64)(imm) >> 16) & 0x1f, 22, ((gint64)(imm) >> 7) & 0x1ff, 27, ((gint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0)
+
+#define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0)
+
+#define ia64_x3(code, qp, imm, bwh, ph, dh, btype) do { read_pr ((code), (qp)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (btype), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xC), 37); } while (0)
+
+#define ia64_brl_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_x3 ((code), (qp), (disp), (bwh), (ph), (dh), 0)
+
+#define ia64_x4(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (b1), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xD), 37); } while (0)
+
+#define ia64_brl_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_x4 ((code), (qp), (b1), (disp), (bwh), (ph), (dh))
+
+#define ia64_x5(code, qp, imm, x3, x6, y) do { read_pr ((code), (qp)); check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (y), 26, (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_nop_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 0)
+#define ia64_hint_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 1)
+
+
+
+
+
+
+/*
+ * Non predicated instruction variants
+ */
+
+
+#define ia64_add(code, r1, r2, r3) ia64_add_pred ((code), 0, r1, r2, r3)
+#define ia64_add1(code, r1, r2, r3) ia64_add1_pred ((code), 0, r1, r2, r3)
+#define ia64_sub(code, r1, r2, r3) ia64_sub_pred ((code), 0, r1, r2, r3)
+#define ia64_sub1(code, r1, r2, r3) ia64_sub1_pred ((code), 0, r1, r2, r3)
+#define ia64_addp4(code, r1, r2, r3) ia64_addp4_pred ((code), 0, r1, r2, r3)
+#define ia64_and(code, r1, r2, r3) ia64_and_pred ((code), 0, r1, r2, r3)
+#define ia64_andcm(code, r1, r2, r3) ia64_andcm_pred ((code), 0, r1, r2, r3)
+#define ia64_or(code, r1, r2, r3) ia64_or_pred ((code), 0, r1, r2, r3)
+#define ia64_xor(code, r1, r2, r3) ia64_xor_pred ((code), 0, r1, r2, r3)
+
+
+#define ia64_shladd(code, r1, r2, r3,count) ia64_shladd_pred ((code), 0, r1, r2, r3,count)
+#define ia64_shladdp4(code, r1, r2, r3,count) ia64_shladdp4_pred ((code), 0, r1, r2, r3,count)
+
+
+#define ia64_sub_imm(code, r1,imm8,r3) ia64_sub_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_and_imm(code, r1,imm8,r3) ia64_and_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_andcm_imm(code, r1,imm8,r3) ia64_andcm_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_or_imm(code, r1,imm8,r3) ia64_or_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_xor_imm(code, r1,imm8,r3) ia64_xor_imm_pred ((code), 0, r1,imm8,r3)
+
+
+#define ia64_adds_imm(code, r1,imm14,r3) ia64_adds_imm_pred ((code), 0, r1,imm14,r3)
+#define ia64_addp4_imm(code, r1,imm14,r3) ia64_addp4_imm_pred ((code), 0, r1,imm14,r3)
+
+
+#define ia64_addl_imm(code, r1,imm22,r3) ia64_addl_imm_pred ((code), 0, r1,imm22,r3)
+
+
+#define ia64_cmp_lt(code, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ltu(code, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq(code, p1, p2, r2, r3) ia64_cmp_eq_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_unc(code, p1, p2, r2, r3) ia64_cmp_lt_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ltu_unc(code, p1, p2, r2, r3) ia64_cmp_ltu_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_unc(code, p1, p2, r2, r3) ia64_cmp_eq_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_and(code, p1, p2, r2, r3) ia64_cmp_eq_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_or(code, p1, p2, r2, r3) ia64_cmp_eq_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_or_andcm(code, p1, p2, r2, r3) ia64_cmp_eq_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ne_and(code, p1, p2, r2, r3) ia64_cmp_ne_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ne_or(code, p1, p2, r2, r3) ia64_cmp_ne_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ne_or_andcm(code, p1, p2, r2, r3) ia64_cmp_ne_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp4_lt(code, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ltu(code, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq(code, p1, p2, r2, r3) ia64_cmp4_eq_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_unc(code, p1, p2, r2, r3) ia64_cmp4_lt_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ltu_unc(code, p1, p2, r2, r3) ia64_cmp4_ltu_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_unc(code, p1, p2, r2, r3) ia64_cmp4_eq_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_and(code, p1, p2, r2, r3) ia64_cmp4_eq_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_or(code, p1, p2, r2, r3) ia64_cmp4_eq_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_eq_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ne_and(code, p1, p2, r2, r3) ia64_cmp4_ne_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ne_or(code, p1, p2, r2, r3) ia64_cmp4_ne_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ne_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_ne_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+/* Pseudo ops */
+#define ia64_cmp_ne(code, p1, p2, r2, r3) ia64_cmp_ne_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le(code, p1, p2, r2, r3) ia64_cmp_le_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gt(code, p1, p2, r2, r3) ia64_cmp_gt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge(code, p1, p2, r2, r3) ia64_cmp_ge_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_leu(code, p1, p2, r2, r3) ia64_cmp_leu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gtu(code, p1, p2, r2, r3) ia64_cmp_gtu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_geu(code, p1, p2, r2, r3) ia64_cmp_geu_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp4_ne(code, p1, p2, r2, r3) ia64_cmp4_ne_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le(code, p1, p2, r2, r3) ia64_cmp4_le_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gt(code, p1, p2, r2, r3) ia64_cmp4_gt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge(code, p1, p2, r2, r3) ia64_cmp4_ge_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_leu(code, p1, p2, r2, r3) ia64_cmp4_leu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gtu(code, p1, p2, r2, r3) ia64_cmp4_gtu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_geu(code, p1, p2, r2, r3) ia64_cmp4_geu_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp_gt_and(code, p1, p2, r2, r3) ia64_cmp_gt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gt_or(code, p1, p2, r2, r3) ia64_cmp_gt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gt_or_andcm(code, p1, p2, r2, r3) ia64_cmp_gt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le_and(code, p1, p2, r2, r3) ia64_cmp_le_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le_or(code, p1, p2, r2, r3) ia64_cmp_le_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le_or_andcm(code, p1, p2, r2, r3) ia64_cmp_le_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge_and(code, p1, p2, r2, r3) ia64_cmp_ge_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge_or(code, p1, p2, r2, r3) ia64_cmp_ge_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge_or_andcm(code, p1, p2, r2, r3) ia64_cmp_ge_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_and(code, p1, p2, r2, r3) ia64_cmp_lt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_or(code, p1, p2, r2, r3) ia64_cmp_lt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_or_andcm(code, p1, p2, r2, r3) ia64_cmp_lt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp4_gt_and(code, p1, p2, r2, r3) ia64_cmp4_gt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gt_or(code, p1, p2, r2, r3) ia64_cmp4_gt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gt_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_gt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le_and(code, p1, p2, r2, r3) ia64_cmp4_le_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le_or(code, p1, p2, r2, r3) ia64_cmp4_le_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_le_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge_and(code, p1, p2, r2, r3) ia64_cmp4_ge_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge_or(code, p1, p2, r2, r3) ia64_cmp4_ge_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_ge_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_and(code, p1, p2, r2, r3) ia64_cmp4_lt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_or(code, p1, p2, r2, r3) ia64_cmp4_lt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_lt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+
+#define ia64_cmp_lt_imm(code, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ltu_imm(code, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_lt_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_lt_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ltu_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_ltu_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_and_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_or_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ne_and_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ne_or_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ne_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+
+#define ia64_cmp4_lt_imm(code, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ltu_imm(code, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_lt_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_lt_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ltu_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_ltu_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_and_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_or_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ne_and_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ne_or_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ne_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+
+/* Pseudo ops */
+#define ia64_cmp_ne_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_le_imm(code, p1, p2, imm8, r3) ia64_cmp_le_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_gt_imm(code, p1, p2, imm8, r3) ia64_cmp_gt_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ge_imm(code, p1, p2, imm8, r3) ia64_cmp_ge_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_leu_imm(code, p1, p2, imm8, r3) ia64_cmp_leu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_gtu_imm(code, p1, p2, imm8, r3) ia64_cmp_gtu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_geu_imm(code, p1, p2, imm8, r3) ia64_cmp_geu_imm_pred((code), 0, p1, p2, imm8, r3)
+
+#define ia64_cmp4_ne_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_le_imm(code, p1, p2, imm8, r3) ia64_cmp4_le_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_gt_imm(code, p1, p2, imm8, r3) ia64_cmp4_gt_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ge_imm(code, p1, p2, imm8, r3) ia64_cmp4_ge_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_leu_imm(code, p1, p2, imm8, r3) ia64_cmp4_leu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_gtu_imm(code, p1, p2, imm8, r3) ia64_cmp4_gtu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_geu_imm(code, p1, p2, imm8, r3) ia64_cmp4_geu_imm_pred((code), 0, p1, p2, imm8, r3)
+
+#define ia64_padd1(code, r1,r2,r3) ia64_padd1_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2(code, r1,r2,r3) ia64_padd2_pred ((code), 0, r1,r2,r3)
+#define ia64_padd4(code, r1,r2,r3) ia64_padd4_pred ((code), 0, r1,r2,r3)
+#define ia64_padd1_sss(code, r1,r2,r3) ia64_padd1_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2_sss(code, r1,r2,r3) ia64_padd2_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_padd1_uuu(code, r1,r2,r3) ia64_padd1_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2_uuu(code, r1,r2,r3) ia64_padd2_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_padd1_uus(code, r1,r2,r3) ia64_padd1_uus_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2_uus(code, r1,r2,r3) ia64_padd2_uus_pred ((code), 0, r1,r2,r3)
+
+#define ia64_psub1(code, r1,r2,r3) ia64_psub1_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2(code, r1,r2,r3) ia64_psub2_pred ((code), 0, r1,r2,r3)
+#define ia64_psub4(code, r1,r2,r3) ia64_psub4_pred ((code), 0, r1,r2,r3)
+#define ia64_psub1_sss(code, r1,r2,r3) ia64_psub1_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2_sss(code, r1,r2,r3) ia64_psub2_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_psub1_uuu(code, r1,r2,r3) ia64_psub1_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2_uuu(code, r1,r2,r3) ia64_psub2_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_psub1_uus(code, r1,r2,r3) ia64_psub1_uus_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2_uus(code, r1,r2,r3) ia64_psub2_uus_pred ((code), 0, r1,r2,r3)
+
+#define ia64_pavg1(code, r1,r2,r3) ia64_pavg1_pred ((code), 0, r1,r2,r3)
+#define ia64_pavg2(code, r1,r2,r3) ia64_pavg2_pred ((code), 0, r1,r2,r3)
+#define ia64_pavg1_raz(code, r1,r2,r3) ia64_pavg1_raz_pred ((code), 0, r1,r2,r3)
+#define ia64_pavg2_raz(code, r1,r2,r3) ia64_pavg2_raz_pred ((code), 0, r1,r2,r3)
+#define ia64_pavgsub1(code, r1,r2,r3) ia64_pavgsub1_pred ((code), 0, r1,r2,r3)
+#define ia64_pavgsub2(code, r1,r2,r3) ia64_pavgsub2_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp1_eq(code, r1,r2,r3) ia64_pcmp1_eq_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp2_eq(code, r1,r2,r3) ia64_pcmp2_eq_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp4_eq(code, r1,r2,r3) ia64_pcmp4_eq_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp1_gt(code, r1,r2,r3) ia64_pcmp1_gt_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp2_gt(code, r1,r2,r3) ia64_pcmp2_gt_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp4_gt(code, r1,r2,r3) ia64_pcmp4_gt_pred ((code), 0, r1,r2,r3)
+
+
+#define ia64_pshladd2(code, r1, r2, r3, count) ia64_pshladd2_pred ((code), 0, r1, r2, r3, count)
+#define ia64_pshradd2(code, r1, r2, r3, count) ia64_pshradd2_pred ((code), 0, r1, r2, r3, count)
+
+#define ia64_pmpyshr2(code, r1, r2, r3, count) ia64_pmpyshr2_pred ((code), 0, r1, r2, r3, count)
+
+#define ia64_pmpyshr2_u(code, r1, r2, r3, count) ia64_pmpyshr2_u_pred ((code), 0, r1, r2, r3, count)
+
+
+#define ia64_pmpy2_r(code, r1, r2, r3) ia64_pmpy2_r_pred ((code), 0, r1, r2, r3)
+#define ia64_pmpy2_l(code, r1, r2, r3) ia64_pmpy2_l_pred ((code), 0, r1, r2, r3)
+#define ia64_mix1_r(code, r1, r2, r3) ia64_mix1_r_pred ((code), 0, r1, r2, r3)
+#define ia64_mix2_r(code, r1, r2, r3) ia64_mix2_r_pred ((code), 0, r1, r2, r3)
+#define ia64_mix4_r(code, r1, r2, r3) ia64_mix4_r_pred ((code), 0, r1, r2, r3)
+#define ia64_mix1_l(code, r1, r2, r3) ia64_mix1_l_pred ((code), 0, r1, r2, r3)
+#define ia64_mix2_l(code, r1, r2, r3) ia64_mix2_l_pred ((code), 0, r1, r2, r3)
+#define ia64_mix4_l(code, r1, r2, r3) ia64_mix4_l_pred ((code), 0, r1, r2, r3)
+#define ia64_pack2_uss(code, r1, r2, r3) ia64_pack2_uss_pred ((code), 0, r1, r2, r3)
+#define ia64_pack2_sss(code, r1, r2, r3) ia64_pack2_sss_pred ((code), 0, r1, r2, r3)
+#define ia64_pack4_sss(code, r1, r2, r3) ia64_pack4_sss_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack1_h(code, r1, r2, r3) ia64_unpack1_h_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack2_h(code, r1, r2, r3) ia64_unpack2_h_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack4_h(code, r1, r2, r3) ia64_unpack4_h_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack1_l(code, r1, r2, r3) ia64_unpack1_l_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack2_l(code, r1, r2, r3) ia64_unpack2_l_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack4_l(code, r1, r2, r3) ia64_unpack4_l_pred ((code), 0, r1, r2, r3)
+#define ia64_pmin1_u(code, r1, r2, r3) ia64_pmin1_u_pred ((code), 0, r1, r2, r3)
+#define ia64_pmax1_u(code, r1, r2, r3) ia64_pmax1_u_pred ((code), 0, r1, r2, r3)
+#define ia64_pmin2(code, r1, r2, r3) ia64_pmin2_pred ((code), 0, r1, r2, r3)
+#define ia64_pmax2(code, r1, r2, r3) ia64_pmax2_pred ((code), 0, r1, r2, r3)
+#define ia64_psad1(code, r1, r2, r3) ia64_psad1_pred ((code), 0, r1, r2, r3)
+
+#define ia64_mux1(code, r1, r2, mbtype) ia64_mux1_pred ((code), 0, r1, r2, mbtype)
+
+
+#define ia64_mux2(code, r1, r2, mhtype) ia64_mux2_pred ((code), 0, r1, r2, mhtype)
+
+
+#define ia64_pshr2(code, r1, r3, r2) ia64_pshr2_pred ((code), 0, r1, r3, r2)
+#define ia64_pshr4(code, r1, r3, r2) ia64_pshr4_pred ((code), 0, r1, r3, r2)
+#define ia64_shr(code, r1, r3, r2) ia64_shr_pred ((code), 0, r1, r3, r2)
+#define ia64_pshr2_u(code, r1, r3, r2) ia64_pshr2_u_pred ((code), 0, r1, r3, r2)
+#define ia64_pshr4_u(code, r1, r3, r2) ia64_pshr4_u_pred ((code), 0, r1, r3, r2)
+#define ia64_shr_u(code, r1, r3, r2) ia64_shr_u_pred ((code), 0, r1, r3, r2)
+
+
+#define ia64_pshr2_imm(code, r1, r3, count) ia64_pshr2_imm_pred ((code), 0, r1, r3, count)
+#define ia64_pshr4_imm(code, r1, r3, count) ia64_pshr4_imm_pred ((code), 0, r1, r3, count)
+#define ia64_pshr2_u_imm(code, r1, r3, count) ia64_pshr2_u_imm_pred ((code), 0, r1, r3, count)
+#define ia64_pshr4_u_imm(code, r1, r3, count) ia64_pshr4_u_imm_pred ((code), 0, r1, r3, count)
+
+
+#define ia64_pshl2(code, r1, r3, r2) ia64_pshl2_pred ((code), 0, r1, r3, r2)
+#define ia64_pshl4(code, r1, r3, r2) ia64_pshl4_pred ((code), 0, r1, r3, r2)
+#define ia64_shl(code, r1, r3, r2) ia64_shl_pred ((code), 0, r1, r3, r2)
+
+#define ia64_shl_imm(code, r1, r3, count) ia64_dep_z ((code), (r1), (r3), count, 64 - count)
+#define ia64_shr_imm(code, r1, r3, count) ia64_extr ((code), (r1), (r3), count, 64 - count)
+#define ia64_shr_u_imm(code, r1, r3, count) ia64_extr_u ((code), (r1), (r3), count, 64 - count)
+
+#define ia64_pshl2_imm(code, r1, r2, count) ia64_pshl2_imm_pred ((code), 0, r1, r2, count)
+#define ia64_pshl4_imm(code, r1, r2, count) ia64_pshl4_imm_pred ((code), 0, r1, r2, count)
+
+
+#define ia64_popcnt(code, r1, r3) ia64_popcnt_pred ((code), 0, r1, r3)
+
+
+#define ia64_shrp(code, r1, r2, r3, count) ia64_shrp_pred ((code), 0, r1, r2, r3, count)
+
+
+#define ia64_extr_u(code, r1, r3, pos, len) ia64_extr_u_pred ((code), 0, r1, r3, pos, len)
+#define ia64_extr(code, r1, r3, pos, len) ia64_extr_pred ((code), 0, r1, r3, pos, len)
+
+
+#define ia64_dep_z(code, r1, r2, pos, len) ia64_dep_z_pred ((code), 0, r1, r2, pos, len)
+
+
+#define ia64_dep_z_imm(code, r1, imm, pos, len) ia64_dep_z_imm_pred ((code), 0, r1, imm, pos, len)
+
+
+#define ia64_dep_imm(code, r1, imm, r3, pos, len) ia64_dep_imm_pred ((code), 0, r1, imm, r3, pos, len)
+
+
+#define ia64_dep(code, r1, r2, r3, pos, len) ia64_dep_pred ((code), 0, r1, r2, r3, pos, len)
+
+
+#define ia64_tbit_z(code, p1, p2, r3, pos) ia64_tbit_z_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_unc(code, p1, p2, r3, pos) ia64_tbit_z_unc_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_and(code, p1, p2, r3, pos) ia64_tbit_z_and_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_nz_and(code, p1, p2, r3, pos) ia64_tbit_nz_and_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_or(code, p1, p2, r3, pos) ia64_tbit_z_or_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_nz_or(code, p1, p2, r3, pos) ia64_tbit_nz_or_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_or_andcm(code, p1, p2, r3, pos) ia64_tbit_z_or_andcm_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_nz_or_andcm(code, p1, p2, r3, pos) ia64_tbit_nz_or_andcm_pred ((code), 0, p1, p2, r3, pos)
+
+
+#define ia64_tnat_z(code, p1, p2, r3) ia64_tnat_z_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_unc(code, p1, p2, r3) ia64_tnat_z_unc_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_and(code, p1, p2, r3) ia64_tnat_z_and_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_nz_and(code, p1, p2, r3) ia64_tnat_nz_and_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_or(code, p1, p2, r3) ia64_tnat_z_or_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_nz_or(code, p1, p2, r3) ia64_tnat_nz_or_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_or_andcm(code, p1, p2, r3) ia64_tnat_z_or_andcm_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_nz_or_andcm(code, p1, p2, r3) ia64_tnat_nz_or_andcm_pred ((code), 0, p1, p2, r3)
+
+
+#define ia64_nop_i(code, imm) ia64_nop_i_pred ((code), 0, imm)
+#define ia64_hint_i(code, imm) ia64_hint_i_pred ((code), 0, imm)
+
+
+#define ia64_break_i(code, imm) ia64_break_i_pred ((code), 0, imm)
+
+
+#define ia64_chk_s_i(code, r2,disp) ia64_chk_s_i_pred ((code), 0, r2,disp)
+
+#define ia64_mov_to_br_hint(code, b1, r2, disp, wh, ih) ia64_mov_to_br_hint_pred ((code), 0, b1, r2, disp, wh, ih)
+#define ia64_mov_ret_to_br_hint(code, b1, r2, disp, wh, ih) ia64_mov_ret_to_br_hint_pred ((code), 0, b1, r2, disp, wh, ih)
+
+/* Pseudo ops */
+
+#define ia64_mov_to_br(code, b1, r2) ia64_mov_to_br_pred ((code), 0, (b1), (r2))
+#define ia64_mov_ret_to_br(code, b1, r2) ia64_mov_ret_to_br_pred ((code), 0, (b1), (r2))
+
+/* End of pseudo ops */
+
+#define ia64_mov_from_br(code, r1, b2) ia64_mov_from_br_pred ((code), 0, r1, b2)
+
+
+#define ia64_mov_to_pred(code, r2, mask) ia64_mov_to_pred_pred ((code), 0, r2, mask)
+
+
+#define ia64_mov_to_pred_rot_imm(code, imm) ia64_mov_to_pred_rot_imm_pred ((code), 0, imm)
+
+
+#define ia64_mov_from_ip(code, r1) ia64_mov_from_ip_pred ((code), 0, r1)
+#define ia64_mov_from_pred(code, r1) ia64_mov_from_pred_pred ((code), 0, r1)
+
+
+#define ia64_mov_to_ar_i(code, ar3, r2) ia64_mov_to_ar_i_pred ((code), 0, ar3, r2)
+
+
+#define ia64_mov_to_ar_imm_i(code, ar3, imm) ia64_mov_to_ar_imm_i_pred ((code), 0, ar3, imm)
+
+
+#define ia64_mov_from_ar_i(code, r1, ar3) ia64_mov_from_ar_i_pred ((code), 0, r1, ar3)
+
+
+#define ia64_zxt1(code, r1, r3) ia64_zxt1_pred ((code), 0, r1, r3)
+#define ia64_zxt2(code, r1, r3) ia64_zxt2_pred ((code), 0, r1, r3)
+#define ia64_zxt4(code, r1, r3) ia64_zxt4_pred ((code), 0, r1, r3)
+#define ia64_sxt1(code, r1, r3) ia64_sxt1_pred ((code), 0, r1, r3)
+#define ia64_sxt2(code, r1, r3) ia64_sxt2_pred ((code), 0, r1, r3)
+#define ia64_sxt4(code, r1, r3) ia64_sxt4_pred ((code), 0, r1, r3)
+#define ia64_czx1_l(code, r1, r3) ia64_czx1_l_pred ((code), 0, r1, r3)
+#define ia64_czx2_l(code, r1, r3) ia64_czx2_l_pred ((code), 0, r1, r3)
+#define ia64_czx1_r(code, r1, r3) ia64_czx1_r_pred ((code), 0, r1, r3)
+#define ia64_czx2_r(code, r1, r3) ia64_czx2_r_pred ((code), 0, r1, r3)
+
+#define ia64_ld1_hint(code, r1, r3, hint) ia64_ld1_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_hint(code, r1, r3, hint) ia64_ld2_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_hint(code, r1, r3, hint) ia64_ld4_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_hint(code, r1, r3, hint) ia64_ld8_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_s_hint(code, r1, r3, hint) ia64_ld1_s_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_s_hint(code, r1, r3, hint) ia64_ld2_s_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_s_hint(code, r1, r3, hint) ia64_ld4_s_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_s_hint(code, r1, r3, hint) ia64_ld8_s_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_a_hint(code, r1, r3, hint) ia64_ld1_a_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_a_hint(code, r1, r3, hint) ia64_ld2_a_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_a_hint(code, r1, r3, hint) ia64_ld4_a_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_a_hint(code, r1, r3, hint) ia64_ld8_a_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_sa_hint(code, r1, r3, hint) ia64_ld1_sa_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_sa_hint(code, r1, r3, hint) ia64_ld2_sa_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_sa_hint(code, r1, r3, hint) ia64_ld4_sa_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_sa_hint(code, r1, r3, hint) ia64_ld8_sa_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_bias_hint(code, r1, r3, hint) ia64_ld1_bias_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_bias_hint(code, r1, r3, hint) ia64_ld2_bias_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_bias_hint(code, r1, r3, hint) ia64_ld4_bias_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_bias_hint(code, r1, r3, hint) ia64_ld8_bias_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_acq_hint(code, r1, r3, hint) ia64_ld1_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_acq_hint(code, r1, r3, hint) ia64_ld2_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_acq_hint(code, r1, r3, hint) ia64_ld4_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_acq_hint(code, r1, r3, hint) ia64_ld8_acq_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld8_fill_hint(code, r1, r3, hint) ia64_ld8_fill_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_c_clr_hint(code, r1, r3, hint) ia64_ld1_c_clr_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_c_clr_hint(code, r1, r3, hint) ia64_ld2_c_clr_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_c_clr_hint(code, r1, r3, hint) ia64_ld4_c_clr_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_c_clr_hint(code, r1, r3, hint) ia64_ld8_c_clr_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_c_nc_hint(code, r1, r3, hint) ia64_ld1_c_nc_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_c_nc_hint(code, r1, r3, hint) ia64_ld2_c_nc_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_c_nc_hint(code, r1, r3, hint) ia64_ld4_c_nc_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_c_nc_hint(code, r1, r3, hint) ia64_ld8_c_nc_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_c_clr_acq_hint(code, r1, r3, hint) ia64_ld1_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_c_clr_acq_hint(code, r1, r3, hint) ia64_ld2_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_c_clr_acq_hint(code, r1, r3, hint) ia64_ld4_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_c_clr_acq_hint(code, r1, r3, hint) ia64_ld8_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld16_hint(code, r1, r3, hint) ia64_ld16_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld16_acq_hint(code, r1, r3, hint) ia64_ld16_acq_hint_pred ((code), 0, r1, r3, hint)
+
+
+#define ia64_ld1_inc_hint(code, r1, r2, r3, hint) ia64_ld1_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_inc_hint(code, r1, r2, r3, hint) ia64_ld2_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_inc_hint(code, r1, r2, r3, hint) ia64_ld4_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_inc_hint(code, r1, r2, r3, hint) ia64_ld8_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_s_inc_hint(code, r1, r2, r3, hint) ia64_ld1_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_s_inc_hint(code, r1, r2, r3, hint) ia64_ld2_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_s_inc_hint(code, r1, r2, r3, hint) ia64_ld4_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_s_inc_hint(code, r1, r2, r3, hint) ia64_ld8_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_a_inc_hint(code, r1, r2, r3, hint) ia64_ld1_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_a_inc_hint(code, r1, r2, r3, hint) ia64_ld2_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_a_inc_hint(code, r1, r2, r3, hint) ia64_ld4_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_a_inc_hint(code, r1, r2, r3, hint) ia64_ld8_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld1_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld2_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld4_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld8_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld1_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld2_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld4_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld8_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld1_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld2_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld4_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld8_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld8_fill_inc_hint(code, r1, r2, r3, hint) ia64_ld8_fill_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+
+#define ia64_ld1_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld8_fill_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_fill_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+/* Pseudo ops */
+
+#define ia64_ld1(code, r1, r3) ia64_ld1_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2(code, r1, r3) ia64_ld2_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4(code, r1, r3) ia64_ld4_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8(code, r1, r3) ia64_ld8_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_s(code, r1, r3) ia64_ld1_s_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_s(code, r1, r3) ia64_ld2_s_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_s(code, r1, r3) ia64_ld4_s_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_s(code, r1, r3) ia64_ld8_s_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_a(code, r1, r3) ia64_ld1_a_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_a(code, r1, r3) ia64_ld2_a_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_a(code, r1, r3) ia64_ld4_a_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_a(code, r1, r3) ia64_ld8_a_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_sa(code, r1, r3) ia64_ld1_sa_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_sa(code, r1, r3) ia64_ld2_sa_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_sa(code, r1, r3) ia64_ld4_sa_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_sa(code, r1, r3) ia64_ld8_sa_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_bias(code, r1, r3) ia64_ld1_bias_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_bias(code, r1, r3) ia64_ld2_bias_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_bias(code, r1, r3) ia64_ld4_bias_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_bias(code, r1, r3) ia64_ld8_bias_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_acq(code, r1, r3) ia64_ld1_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_acq(code, r1, r3) ia64_ld2_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_acq(code, r1, r3) ia64_ld4_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_acq(code, r1, r3) ia64_ld8_acq_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld8_fill(code, r1, r3) ia64_ld8_fill_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_c_clr(code, r1, r3) ia64_ld1_c_clr_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_c_clr(code, r1, r3) ia64_ld2_c_clr_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_c_clr(code, r1, r3) ia64_ld4_c_clr_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_c_clr(code, r1, r3) ia64_ld8_c_clr_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_c_nc(code, r1, r3) ia64_ld1_c_nc_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_c_nc(code, r1, r3) ia64_ld2_c_nc_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_c_nc(code, r1, r3) ia64_ld4_c_nc_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_c_nc(code, r1, r3) ia64_ld8_c_nc_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_c_clr_acq(code, r1, r3) ia64_ld1_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_c_clr_acq(code, r1, r3) ia64_ld2_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_c_clr_acq(code, r1, r3) ia64_ld4_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_c_clr_acq(code, r1, r3) ia64_ld8_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld16(code, r1, r3) ia64_ld16_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld16_acq(code, r1, r3) ia64_ld16_acq_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_inc(code, r1, r2, r3) ia64_ld1_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_inc(code, r1, r2, r3) ia64_ld2_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_inc(code, r1, r2, r3) ia64_ld4_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_inc(code, r1, r2, r3) ia64_ld8_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_s_inc(code, r1, r2, r3) ia64_ld1_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_s_inc(code, r1, r2, r3) ia64_ld2_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_s_inc(code, r1, r2, r3) ia64_ld4_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_s_inc(code, r1, r2, r3) ia64_ld8_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_a_inc(code, r1, r2, r3) ia64_ld1_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_a_inc(code, r1, r2, r3) ia64_ld2_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_a_inc(code, r1, r2, r3) ia64_ld4_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_a_inc(code, r1, r2, r3) ia64_ld8_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_sa_inc(code, r1, r2, r3) ia64_ld1_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_sa_inc(code, r1, r2, r3) ia64_ld2_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_sa_inc(code, r1, r2, r3) ia64_ld4_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_sa_inc(code, r1, r2, r3) ia64_ld8_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_bias_inc(code, r1, r2, r3) ia64_ld1_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_bias_inc(code, r1, r2, r3) ia64_ld2_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_bias_inc(code, r1, r2, r3) ia64_ld4_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_bias_inc(code, r1, r2, r3) ia64_ld8_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_acq_inc(code, r1, r2, r3) ia64_ld1_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_acq_inc(code, r1, r2, r3) ia64_ld2_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_acq_inc(code, r1, r2, r3) ia64_ld4_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_acq_inc(code, r1, r2, r3) ia64_ld8_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld8_fill_inc(code, r1, r2, r3) ia64_ld8_fill_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_inc(code, r1, r2, r3) ia64_ld1_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_inc(code, r1, r2, r3) ia64_ld2_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_inc(code, r1, r2, r3) ia64_ld4_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_inc(code, r1, r2, r3) ia64_ld8_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_c_nc_inc(code, r1, r2, r3) ia64_ld1_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_c_nc_inc(code, r1, r2, r3) ia64_ld2_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_c_nc_inc(code, r1, r2, r3) ia64_ld4_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_c_nc_inc(code, r1, r2, r3) ia64_ld8_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_acq_inc(code, r1, r2, r3) ia64_ld1_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_acq_inc(code, r1, r2, r3) ia64_ld2_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_acq_inc(code, r1, r2, r3) ia64_ld4_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_acq_inc(code, r1, r2, r3) ia64_ld8_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_inc_imm(code, r1, r3, imm) ia64_ld1_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_inc_imm(code, r1, r3, imm) ia64_ld2_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_inc_imm(code, r1, r3, imm) ia64_ld4_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_inc_imm(code, r1, r3, imm) ia64_ld8_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_s_inc_imm(code, r1, r3, imm) ia64_ld1_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_s_inc_imm(code, r1, r3, imm) ia64_ld2_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_s_inc_imm(code, r1, r3, imm) ia64_ld4_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_s_inc_imm(code, r1, r3, imm) ia64_ld8_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_a_inc_imm(code, r1, r3, imm) ia64_ld1_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_a_inc_imm(code, r1, r3, imm) ia64_ld2_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_a_inc_imm(code, r1, r3, imm) ia64_ld4_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_a_inc_imm(code, r1, r3, imm) ia64_ld8_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_sa_inc_imm(code, r1, r3, imm) ia64_ld1_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_sa_inc_imm(code, r1, r3, imm) ia64_ld2_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_sa_inc_imm(code, r1, r3, imm) ia64_ld4_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_sa_inc_imm(code, r1, r3, imm) ia64_ld8_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_bias_inc_imm(code, r1, r3, imm) ia64_ld1_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_bias_inc_imm(code, r1, r3, imm) ia64_ld2_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_bias_inc_imm(code, r1, r3, imm) ia64_ld4_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_bias_inc_imm(code, r1, r3, imm) ia64_ld8_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_acq_inc_imm(code, r1, r3, imm) ia64_ld1_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_acq_inc_imm(code, r1, r3, imm) ia64_ld2_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_acq_inc_imm(code, r1, r3, imm) ia64_ld4_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_acq_inc_imm(code, r1, r3, imm) ia64_ld8_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld8_fill_inc_imm(code, r1, r3, imm) ia64_ld8_fill_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_inc_imm(code, r1, r3, imm) ia64_ld1_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_inc_imm(code, r1, r3, imm) ia64_ld2_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_inc_imm(code, r1, r3, imm) ia64_ld4_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_inc_imm(code, r1, r3, imm) ia64_ld8_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_c_nc_inc_imm(code, r1, r3, imm) ia64_ld1_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_c_nc_inc_imm(code, r1, r3, imm) ia64_ld2_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_c_nc_inc_imm(code, r1, r3, imm) ia64_ld4_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_c_nc_inc_imm(code, r1, r3, imm) ia64_ld8_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld1_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld2_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld4_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld8_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+/* End of pseudo ops */
+
+#define ia64_st1_hint(code, r3, r2, hint) ia64_st1_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st2_hint(code, r3, r2, hint) ia64_st2_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st4_hint(code, r3, r2, hint) ia64_st4_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st8_hint(code, r3, r2, hint) ia64_st8_hint_pred ((code), 0, r3, r2, hint)
+
+/* Pseudo ops */
+#define ia64_st8(code, r3, r2) ia64_st8_hint ((code), (r3), (r2), 0)
+
+#define ia64_st1_rel_hint(code, r3, r2, hint) ia64_st1_rel_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st2_rel_hint(code, r3, r2, hint) ia64_st2_rel_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st4_rel_hint(code, r3, r2, hint) ia64_st4_rel_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st8_rel_hint(code, r3, r2, hint) ia64_st8_rel_hint_pred ((code), 0, r3, r2, hint)
+
+#define ia64_st8_spill_hint(code, r3, r2, hint) ia64_st8_spill_hint_pred ((code), 0, r3, r2, hint)
+
+#define ia64_st16_hint(code, r3, r2, hint) ia64_st16_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st16_rel_hint(code, r3, r2, hint) ia64_st16_rel_hint_pred ((code), 0, r3, r2, hint)
+
+#define ia64_st1_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st2_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st4_inc_imm_hint(code, r3, r2, imm, hint) ia64_st4_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st8_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+
+#define ia64_st1_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st2_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st4_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st4_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st8_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+
+#define ia64_st8_spill_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_spill_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+
+
+#define ia64_ldfs_hint(code, f1, r3, hint) ia64_ldfs_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_hint(code, f1, r3, hint) ia64_ldfd_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_hint(code, f1, r3, hint) ia64_ldf8_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_hint(code, f1, r3, hint) ia64_ldfe_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_s_hint(code, f1, r3, hint) ia64_ldfs_s_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_s_hint(code, f1, r3, hint) ia64_ldfd_s_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_s_hint(code, f1, r3, hint) ia64_ldf8_s_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_s_hint(code, f1, r3, hint) ia64_ldfe_s_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_a_hint(code, f1, r3, hint) ia64_ldfs_a_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_a_hint(code, f1, r3, hint) ia64_ldfd_a_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_a_hint(code, f1, r3, hint) ia64_ldf8_a_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_a_hint(code, f1, r3, hint) ia64_ldfe_a_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_sa_hint(code, f1, r3, hint) ia64_ldfs_sa_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_sa_hint(code, f1, r3, hint) ia64_ldfd_sa_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_sa_hint(code, f1, r3, hint) ia64_ldf8_sa_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_sa_hint(code, f1, r3, hint) ia64_ldfe_sa_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_c_clr_hint(code, f1, r3, hint) ia64_ldfs_c_clr_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_c_clr_hint(code, f1, r3, hint) ia64_ldfd_c_clr_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_c_clr_hint(code, f1, r3, hint) ia64_ldf8_c_clr_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_c_clr_hint(code, f1, r3, hint) ia64_ldfe_c_clr_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_c_nc_hint(code, f1, r3, hint) ia64_ldfs_c_nc_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_c_nc_hint(code, f1, r3, hint) ia64_ldfd_c_nc_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_c_nc_hint(code, f1, r3, hint) ia64_ldf8_c_nc_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_c_nc_hint(code, f1, r3, hint) ia64_ldfe_c_nc_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldf_fill_hint(code, f1, r3, hint) ia64_ldf_fill_hint_pred ((code), 0, f1, r3, hint)
+
+
+#define ia64_ldfs_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_s_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_a_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldf_fill_inc_hint(code, f1, r3, r2, hint) ia64_ldf_fill_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+
+#define ia64_ldfs_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldf_fill_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf_fill_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+/* Pseudo ops */
+
+#define ia64_ldfs(code, f1, r3) ia64_ldfs_pred (code, 0, f1, r3)
+#define ia64_ldfd(code, f1, r3) ia64_ldfd_pred (code, 0, f1, r3)
+#define ia64_ldf8(code, f1, r3) ia64_ldf8_pred (code, 0, f1, r3)
+#define ia64_ldfe(code, f1, r3) ia64_ldfe_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_s(code, f1, r3) ia64_ldfs_s_pred (code, 0, f1, r3)
+#define ia64_ldfd_s(code, f1, r3) ia64_ldfd_s_pred (code, 0, f1, r3)
+#define ia64_ldf8_s(code, f1, r3) ia64_ldf8_s_pred (code, 0, f1, r3)
+#define ia64_ldfe_s(code, f1, r3) ia64_ldfe_s_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_a(code, f1, r3) ia64_ldfs_a_pred (code, 0, f1, r3)
+#define ia64_ldfd_a(code, f1, r3) ia64_ldfd_a_pred (code, 0, f1, r3)
+#define ia64_ldf8_a(code, f1, r3) ia64_ldf8_a_pred (code, 0, f1, r3)
+#define ia64_ldfe_a(code, f1, r3) ia64_ldfe_a_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_sa(code, f1, r3) ia64_ldfs_sa_pred (code, 0, f1, r3)
+#define ia64_ldfd_sa(code, f1, r3) ia64_ldfd_sa_pred (code, 0, f1, r3)
+#define ia64_ldf8_sa(code, f1, r3) ia64_ldf8_sa_pred (code, 0, f1, r3)
+#define ia64_ldfe_sa(code, f1, r3) ia64_ldfe_sa_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_c_clr(code, f1, r3) ia64_ldfs_c_clr_pred (code, 0, f1, r3)
+#define ia64_ldfd_c_clr(code, f1, r3) ia64_ldfd_c_clr_pred (code, 0, f1, r3)
+#define ia64_ldf8_c_clr(code, f1, r3) ia64_ldf8_c_clr_pred (code, 0, f1, r3)
+#define ia64_ldfe_c_clr(code, f1, r3) ia64_ldfe_c_clr_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_c_nc(code, f1, r3) ia64_ldfs_c_nc_pred (code, 0, f1, r3)
+#define ia64_ldfd_c_nc(code, f1, r3) ia64_ldfd_c_nc_pred (code, 0, f1, r3)
+#define ia64_ldf8_c_nc(code, f1, r3) ia64_ldf8_c_nc_pred (code, 0, f1, r3)
+#define ia64_ldfe_c_nc(code, f1, r3) ia64_ldfe_c_nc_pred (code, 0, f1, r3)
+
+#define ia64_ldf_fill(code, f1, r3) ia64_ldf_fill_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_inc(code, f1, r3, r2) ia64_ldfs_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_inc(code, f1, r3, r2) ia64_ldfd_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_inc(code, f1, r3, r2) ia64_ldf8_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_inc(code, f1, r3, r2) ia64_ldfe_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_s_inc(code, f1, r3, r2) ia64_ldfs_s_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_s_inc(code, f1, r3, r2) ia64_ldfd_s_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_s_inc(code, f1, r3, r2) ia64_ldf8_s_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_s_inc(code, f1, r3, r2) ia64_ldfe_s_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_a_inc(code, f1, r3, r2) ia64_ldfs_a_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_a_inc(code, f1, r3, r2) ia64_ldfd_a_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_a_inc(code, f1, r3, r2) ia64_ldf8_a_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_a_inc(code, f1, r3, r2) ia64_ldfe_a_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_sa_inc(code, f1, r3, r2) ia64_ldfs_sa_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_sa_inc(code, f1, r3, r2) ia64_ldfd_sa_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_sa_inc(code, f1, r3, r2) ia64_ldf8_sa_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_sa_inc(code, f1, r3, r2) ia64_ldfe_sa_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_c_clr_inc(code, f1, r3, r2) ia64_ldfs_c_clr_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_c_clr_inc(code, f1, r3, r2) ia64_ldfd_c_clr_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_c_clr_inc(code, f1, r3, r2) ia64_ldf8_c_clr_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_c_clr_inc(code, f1, r3, r2) ia64_ldfe_c_clr_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_c_nc_inc(code, f1, r3, r2) ia64_ldfs_c_nc_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_c_nc_inc(code, f1, r3, r2) ia64_ldfd_c_nc_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_c_nc_inc(code, f1, r3, r2) ia64_ldf8_c_nc_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_c_nc_inc(code, f1, r3, r2) ia64_ldfe_c_nc_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldf_fill_inc(code, f1, r3, r2) ia64_ldf_fill_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_inc_imm(code, f1, r3, imm) ia64_ldfs_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_inc_imm(code, f1, r3, imm) ia64_ldfd_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_inc_imm(code, f1, r3, imm) ia64_ldf8_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_inc_imm(code, f1, r3, imm) ia64_ldfe_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_s_inc_imm(code, f1, r3, imm) ia64_ldfs_s_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_s_inc_imm(code, f1, r3, imm) ia64_ldfd_s_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_s_inc_imm(code, f1, r3, imm) ia64_ldf8_s_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_s_inc_imm(code, f1, r3, imm) ia64_ldfe_s_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_a_inc_imm(code, f1, r3, imm) ia64_ldfs_a_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_a_inc_imm(code, f1, r3, imm) ia64_ldfd_a_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_a_inc_imm(code, f1, r3, imm) ia64_ldf8_a_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_a_inc_imm(code, f1, r3, imm) ia64_ldfe_a_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_sa_inc_imm(code, f1, r3, imm) ia64_ldfs_sa_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_sa_inc_imm(code, f1, r3, imm) ia64_ldfd_sa_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_sa_inc_imm(code, f1, r3, imm) ia64_ldf8_sa_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_sa_inc_imm(code, f1, r3, imm) ia64_ldfe_sa_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfs_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfd_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_c_clr_inc_imm(code, f1, r3, imm) ia64_ldf8_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfe_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfs_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfd_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_c_nc_inc_imm(code, f1, r3, imm) ia64_ldf8_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfe_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldf_fill_inc_imm(code, f1, r3, imm) ia64_ldf_fill_inc_imm_pred (code, 0, f1, r3, imm)
+
+/* End of pseudo ops */
+
+#define ia64_stfs_hint(code, r3, f2, hint) ia64_stfs_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stfd_hint(code, r3, f2, hint) ia64_stfd_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stf8_hint(code, r3, f2, hint) ia64_stf8_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stfe_hint(code, r3, f2, hint) ia64_stfe_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stf_spill_hint(code, r3, f2, hint) ia64_stf_spill_hint_pred ((code), 0, r3, f2, hint)
+
+
+#define ia64_stfs_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfs_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stfd_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfd_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stf8_inc_imm_hint(code, r3, f2, imm, hint) ia64_stf8_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stfe_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfe_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stf_spill_inc_imm_hint(code, r3, f2, imm, hint) ia64_stf_spill_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+
+
+#define ia64_ldfps_hint(code, f1, f2, r3, hint) ia64_ldfps_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_hint(code, f1, f2, r3, hint) ia64_ldfpd_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_hint(code, f1, f2, r3, hint) ia64_ldfp8_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_s_hint(code, f1, f2, r3, hint) ia64_ldfps_s_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_s_hint(code, f1, f2, r3, hint) ia64_ldfpd_s_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_s_hint(code, f1, f2, r3, hint) ia64_ldfp8_s_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_a_hint(code, f1, f2, r3, hint) ia64_ldfps_a_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_a_hint(code, f1, f2, r3, hint) ia64_ldfpd_a_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_a_hint(code, f1, f2, r3, hint) ia64_ldfp8_a_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_sa_hint(code, f1, f2, r3, hint) ia64_ldfps_sa_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_sa_hint(code, f1, f2, r3, hint) ia64_ldfpd_sa_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_sa_hint(code, f1, f2, r3, hint) ia64_ldfp8_sa_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfps_c_clr_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_clr_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_clr_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_nc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_nc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_nc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+
+#define ia64_ldfps_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_s_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_s_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_s_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_a_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_a_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_a_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_lfetch_hint(code, r3, hint) ia64_lfetch_hint_pred ((code), 0, r3, hint)
+#define ia64_lfetch_excl_hint(code, r3, hint) ia64_lfetch_excl_hint_pred ((code), 0, r3, hint)
+#define ia64_lfetch_fault_hint(code, r3, hint) ia64_lfetch_fault_hint_pred ((code), 0, r3, hint)
+#define ia64_lfetch_fault_excl_hint(code, r3, hint) ia64_lfetch_fault_excl_hint_pred ((code), 0, r3, hint)
+
+
+#define ia64_lfetch_inc_hint(code, r3, r2, hint) ia64_lfetch_inc_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_lfetch_excl_inc_hint(code, r3, r2, hint) ia64_lfetch_excl_inc_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_lfetch_fault_inc_hint(code, r3, r2, hint) ia64_lfetch_fault_inc_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_lfetch_fault_excl_inc_hint(code, r3, r2, hint) ia64_lfetch_fault_excl_inc_hint_pred ((code), 0, r3, r2, hint)
+
+
+#define ia64_lfetch_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+#define ia64_lfetch_excl_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_excl_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+#define ia64_lfetch_fault_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_fault_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+#define ia64_lfetch_fault_excl_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_fault_excl_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+
+
+#define ia64_cmpxchg1_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg1_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg2_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg2_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg4_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg4_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg8_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg8_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg1_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg1_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg2_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg2_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg4_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg4_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg8_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg8_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg16_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg16_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg16_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg16_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg1_hint(code, r1, r3, r2, hint) ia64_xchg1_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg2_hint(code, r1, r3, r2, hint) ia64_xchg2_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg4_hint(code, r1, r3, r2, hint) ia64_xchg4_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg8_hint(code, r1, r3, r2, hint) ia64_xchg8_hint_pred ((code), 0, r1, r3, r2, hint)
+
+#define ia64_fetchadd4_acq_hint(code, r1, r3, inc, hint) ia64_fetchadd4_acq_hint_pred ((code), 0, r1, r3, inc, hint)
+#define ia64_fetchadd8_acq_hint(code, r1, r3, inc, hint) ia64_fetchadd8_acq_hint_pred ((code), 0, r1, r3, inc, hint)
+#define ia64_fetchadd4_rel_hint(code, r1, r3, inc, hint) ia64_fetchadd4_rel_hint_pred ((code), 0, r1, r3, inc, hint)
+#define ia64_fetchadd8_rel_hint(code, r1, r3, inc, hint) ia64_fetchadd8_rel_hint_pred ((code), 0, r1, r3, inc, hint)
+
+
+#define ia64_setf_sig(code, f1, r2) ia64_setf_sig_pred ((code), 0, f1, r2)
+#define ia64_setf_exp(code, f1, r2) ia64_setf_exp_pred ((code), 0, f1, r2)
+#define ia64_setf_s(code, f1, r2) ia64_setf_s_pred ((code), 0, f1, r2)
+#define ia64_setf_d(code, f1, r2) ia64_setf_d_pred ((code), 0, f1, r2)
+
+
+#define ia64_getf_sig(code, r1, f2) ia64_getf_sig_pred ((code), 0, r1, f2)
+#define ia64_getf_exp(code, r1, f2) ia64_getf_exp_pred ((code), 0, r1, f2)
+#define ia64_getf_s(code, r1, f2) ia64_getf_s_pred ((code), 0, r1, f2)
+#define ia64_getf_d(code, r1, f2) ia64_getf_d_pred ((code), 0, r1, f2)
+
+
+#define ia64_chk_s_m(code, r2,disp) ia64_chk_s_m_pred ((code), 0, r2,disp)
+
+
+#define ia64_chk_s_float_m(code, f2,disp) ia64_chk_s_float_m_pred ((code), 0, f2,disp)
+
+
+#define ia64_chk_a_nc(code, r1,disp) ia64_chk_a_nc_pred ((code), 0, r1,disp)
+#define ia64_chk_a_clr(code, r1,disp) ia64_chk_a_clr_pred ((code), 0, r1,disp)
+
+
+#define ia64_chk_a_nc_float(code, f1,disp) ia64_chk_a_nc_float_pred ((code), 0, f1,disp)
+#define ia64_chk_a_clr_float(code, f1,disp) ia64_chk_a_clr_float_pred ((code), 0, f1,disp)
+
+
+#define ia64_invala(code) ia64_invala_pred ((code), 0)
+#define ia64_fwb(code) ia64_fwb_pred ((code), 0)
+#define ia64_mf(code) ia64_mf_pred ((code), 0)
+#define ia64_mf_a(code) ia64_mf_a_pred ((code), 0)
+#define ia64_srlz_d(code) ia64_srlz_d_pred ((code), 0)
+#define ia64_stlz_i(code) ia64_stlz_i_pred ((code), 0)
+#define ia64_sync_i(code) ia64_sync_i_pred ((code), 0)
+
+
+#define ia64_flushrs(code) ia64_flushrs_pred ((code), 0)
+#define ia64_loadrs(code) ia64_loadrs_pred ((code), 0)
+
+#define ia64_invala_e(code, r1) ia64_invala_e_pred ((code), 0, r1)
+
+
+#define ia64_invala_e_float(code, f1) ia64_invala_e_float_pred ((code), 0, f1)
+
+
+#define ia64_fc(code, r3) ia64_fc_pred ((code), 0, r3)
+#define ia64_fc_i(code, r3) ia64_fc_i_pred ((code), 0, r3)
+
+
+#define ia64_mov_to_ar_m(code, ar3, r2) ia64_mov_to_ar_m_pred ((code), 0, ar3, r2)
+
+
+#define ia64_mov_to_ar_imm_m(code, ar3, imm) ia64_mov_to_ar_imm_m_pred ((code), 0, ar3, imm)
+
+
+#define ia64_mov_from_ar_m(code, r1, ar3) ia64_mov_from_ar_m_pred ((code), 0, r1, ar3)
+
+#define ia64_mov_to_cr(code, cr3, r2) ia64_mov_to_cr_pred ((code), 0, cr3, r2)
+
+
+#define ia64_mov_from_cr(code, r1, cr3) ia64_mov_from_cr_pred ((code), 0, r1, cr3)
+
+
+#define ia64_alloc(code, r1, i, l, o, r) ia64_alloc_pred ((code), 0, r1, i, l, o, r)
+
+
+#define ia64_mov_to_psr_l(code, r2) ia64_mov_to_psr_l_pred ((code), 0, r2)
+#define ia64_mov_to_psr_um(code, r2) ia64_mov_to_psr_um_pred ((code), 0, r2)
+
+
+#define ia64_mov_from_psr(code, r1) ia64_mov_from_psr_pred ((code), 0, r1)
+#define ia64_mov_from_psr_um(code, r1) ia64_mov_from_psr_um_pred ((code), 0, r1)
+
+
+#define ia64_break_m(code, imm) ia64_break_m_pred ((code), 0, imm)
+
+/* The System/Memory Management instruction encodings (M38-M47) */
+
+
+#define ia64_nop_m(code, imm) ia64_nop_m_pred ((code), 0, imm)
+#define ia64_hint_m(code, imm) ia64_hint_m_pred ((code), 0, imm)
+
+#define ia64_br_cond_hint(code, disp, bwh, ph, dh) ia64_br_cond_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_wexit_hint(code, disp, bwh, ph, dh) ia64_br_wexit_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_wtop_hint(code, disp, bwh, ph, dh) ia64_br_wtop_hint_pred ((code), 0, disp, bwh, ph, dh)
+
+
+#define ia64_br_cloop_hint(code, disp, bwh, ph, dh) ia64_br_cloop_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_cexit_hint(code, disp, bwh, ph, dh) ia64_br_cexit_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_ctop_hint(code, disp, bwh, ph, dh) ia64_br_ctop_hint_pred ((code), 0, disp, bwh, ph, dh)
+
+#define ia64_br_call_hint(code, b1, disp, bwh, ph, dh) ia64_br_call_hint_pred ((code), 0, b1, disp, bwh, ph, dh)
+
+#define ia64_br_cond_reg_hint(code, b1, bwh, ph, dh) ia64_br_cond_reg_hint_pred ((code), 0, b1, bwh, ph, dh)
+#define ia64_br_ia_reg_hint(code, b1, bwh, ph, dh) ia64_br_ia_reg_hint_pred ((code), 0, b1, bwh, ph, dh)
+#define ia64_br_ret_reg_hint(code, b1, bwh, ph, dh) ia64_br_ret_reg_hint_pred ((code), 0, b1, bwh, ph, dh)
+
+#define ia64_br_call_reg_hint(code, b1, b2, bwh, ph, dh) ia64_br_call_reg_hint_pred ((code), 0, b1, b2, bwh, ph, dh)
+
+/* Pseudo ops */
+
+#define ia64_br_cond(code, disp) ia64_br_cond_pred (code, 0, disp)
+#define ia64_br_wexit(code, disp) ia64_br_wexit_pred (code, 0, disp)
+#define ia64_br_wtop(code, disp) ia64_br_wtop_pred (code, 0, disp)
+
+#define ia64_br_cloop(code, disp) ia64_br_cloop_pred (code, 0, disp)
+#define ia64_br_cexit(code, disp) ia64_br_cexit_pred (code, 0, disp)
+#define ia64_br_ctop(code, disp) ia64_br_ctop_pred (code, 0, disp)
+
+#define ia64_br_call(code, b1, disp) ia64_br_call_pred (code, 0, b1, disp)
+
+#define ia64_br_cond_reg(code, b1) ia64_br_cond_reg_pred (code, 0, b1)
+#define ia64_br_ia_reg(code, b1) ia64_br_ia_reg_pred (code, 0, b1)
+#define ia64_br_ret_reg(code, b1) ia64_br_ret_reg_pred (code, 0, b1)
+
+#define ia64_br_call_reg(code, b1, b2) ia64_br_call_reg_pred (code, 0, b1, b2)
+
+/* End of pseudo ops */
+
+#define ia64_cover(code) ia64_cover_pred ((code), 0)
+#define ia64_clrrrb(code) ia64_clrrrb_pred ((code), 0)
+#define ia64_clrrrb_pr(code) ia64_clrrrb_pr_pred ((code), 0)
+#define ia64_rfi(code) ia64_rfi_pred ((code), 0)
+#define ia64_bsw_0(code) ia64_bsw_0_pred ((code), 0)
+#define ia64_bsw_1(code) ia64_bsw_1_pred ((code), 0)
+#define ia64_epc(code) ia64_epc_pred ((code), 0)
+
+
+#define ia64_break_b(code, imm) ia64_break_b_pred ((code), 0, imm)
+#define ia64_nop_b(code, imm) ia64_nop_b_pred ((code), 0, imm)
+#define ia64_hint_b(code, imm) ia64_hint_b_pred ((code), 0, imm)
+
+
+#define ia64_break_x(code, imm) ia64_break_x_pred ((code), 0, imm)
+
+
+#define ia64_movl(code, r1, imm) ia64_movl_pred ((code), 0, (r1), (imm))
+
+
+#define ia64_brl_cond_hint(code, disp, bwh, ph, dh) ia64_brl_cond_hint_pred ((code), 0, disp, bwh, ph, dh)
+
+
+#define ia64_brl_call_hint(code, b1, disp, bwh, ph, dh) ia64_brl_call_hint_pred ((code), 0, b1, disp, bwh, ph, dh)
+
+
+#define ia64_nop_x(code, imm) ia64_nop_x_pred ((code), 0, imm)
+#define ia64_hint_x(code, imm) ia64_hint_x_pred ((code), 0, imm)
+
+/*
+ * Pseudo-ops
+ */
+
+#define ia64_mov_pred(code, qp, r1, r3) ia64_adds_imm_pred ((code), (qp), (r1), 0, (r3))
+#define ia64_mov(code, r1, r3) ia64_mov_pred ((code), 0, (r1), (r3))
+
+/*
+ * FLOATING POINT
+ */
+
+#define ia64_fma_sf(code, f1, f3, f4, f2, sf) ia64_fma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fma_s_sf(code, f1, f3, f4, f2, sf) ia64_fma_s_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fma_d_sf(code, f1, f3, f4, f2, sf) ia64_fma_d_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fpma_sf(code, f1, f3, f4, f2, sf) ia64_fpma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fms_sf(code, f1, f3, f4, f2, sf) ia64_fms_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fms_s_sf(code, f1, f3, f4, f2, sf) ia64_fms_s_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fms_d_sf(code, f1, f3, f4, f2, sf) ia64_fms_d_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fpms_sf(code, f1, f3, f4, f2, sf) ia64_fpms_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fnma_sf(code, f1, f3, f4, f2, sf) ia64_fnma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fnma_s_sf(code, f1, f3, f4, f2, sf) ia64_fnma_s_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fnma_d_sf(code, f1, f3, f4, f2, sf) ia64_fnma_d_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fpnma_sf(code, f1, f3, f4, f2, sf) ia64_fpnma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+
+/* Pseudo ops */
+#define ia64_fnorm_s_sf(code, f1, f3, sf) ia64_fnorm_s_sf_pred ((code), 0, (f1), (f3), (sf))
+#define ia64_fnorm_d_sf(code, f1, f3, sf) ia64_fnorm_d_sf_pred ((code), 0, (f1), (f3), (sf))
+
+#define ia64_xma_l(code, f1, f3, f4, f2) ia64_xma_l_pred ((code), 0, f1, f3, f4, f2)
+#define ia64_xma_h(code, f1, f3, f4, f2) ia64_xma_h_pred ((code), 0, f1, f3, f4, f2)
+#define ia64_xma_hu(code, f1, f3, f4, f2) ia64_xma_hu_pred ((code), 0, f1, f3, f4, f2)
+
+/* Pseudo ops */
+#define ia64_xmpy_l(code, f1, f3, f4) ia64_xmpy_l_pred ((code), 0, (f1), (f3), (f4))
+#define ia64_xmpy_lu(code, f1, f3, f4) ia64_xmpy_lu_pred ((code), 0, (f1), (f3), (f4))
+#define ia64_xmpy_h(code, f1, f3, f4) ia64_xmpy_h_pred ((code), 0, (f1), (f3), (f4))
+#define ia64_xmpy_hu(code, f1, f3, f4) ia64_xmpy_hu_pred ((code), 0, (f1), (f3), (f4))
+
+#define ia64_fselect(code, f1, f3, f4, f2) ia64_fselect_pred ((code), 0, f1, f3, f4, f2)
+
+#define ia64_fcmp_eq_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_eq_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_lt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_le_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_unord_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_eq_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_eq_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_lt_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_lt_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_le_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_le_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_unord_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_unord_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+
+/* Pseudo ops */
+#define ia64_fcmp_gt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_gt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ge_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ge_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ne_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ne_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_nlt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nlt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_nle_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nle_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ngt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ngt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_nge_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nge_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ord_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ord_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+
+#define ia64_fclass_m(code, p1, p2, f2, fclass) ia64_fclass_m_pred ((code), 0, p1, p2, f2, fclass)
+#define ia64_fclass_m_unc(code, p1, p2, f2, fclass) ia64_fclass_m_unc_pred ((code), 0, p1, p2, f2, fclass)
+
+#define ia64_frcpa_sf(code, f1, p2, f2, f3, sf) ia64_frcpa_sf_pred ((code), 0, f1, p2, f2, f3, sf)
+#define ia64_fprcpa_sf(code, f1, p2, f2, f3, sf) ia64_fprcpa_sf_pred ((code), 0, f1, p2, f2, f3, sf)
+
+#define ia64_frsqrta_sf(code, f1, p2, f3, sf) ia64_frsqrta_sf_pred ((code), 0, f1, p2, f3, sf)
+#define ia64_fprsqrta_sf(code, f1, p2, f3, sf) ia64_fprsqrta_sf_pred ((code), 0, f1, p2, f3, sf)
+
+#define ia64_fmin_sf(code, f1, f2, f3, sf) ia64_fmin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fman_sf(code, f1, f2, f3, sf) ia64_fman_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_famin_sf(code, f1, f2, f3, sf) ia64_famin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_famax_sf(code, f1, f2, f3, sf) ia64_famax_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpmin_sf(code, f1, f2, f3, sf) ia64_fpmin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpman_sf(code, f1, f2, f3, sf) ia64_fpman_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpamin_sf(code, f1, f2, f3, sf) ia64_fpamin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpamax_sf(code, f1, f2, f3, sf) ia64_fpamax_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_eq_sf(code, f1, f2, f3, sf) ia64_fpcmp_eq_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_lt_sf(code, f1, f2, f3, sf) ia64_fpcmp_lt_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_le_sf(code, f1, f2, f3, sf) ia64_fpcmp_le_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_unord_sf(code, f1, f2, f3, sf) ia64_fpcmp_unord_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_neq_sf(code, f1, f2, f3, sf) ia64_fpcmp_neq_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_nlt_sf(code, f1, f2, f3, sf) ia64_fpcmp_nlt_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_nle_sf(code, f1, f2, f3, sf) ia64_fpcmp_nle_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_ord_sf(code, f1, f2, f3, sf) ia64_fpcmp_ord_sf_pred ((code), 0, f1, f2, f3, sf)
+
+#define ia64_fmerge_s(code, f1, f2, f3) ia64_fmerge_s_pred ((code), 0, f1, f2, f3)
+#define ia64_fmerge_ns(code, f1, f2, f3) ia64_fmerge_ns_pred ((code), 0, f1, f2, f3)
+#define ia64_fmerge_se(code, f1, f2, f3) ia64_fmerge_se_pred ((code), 0, f1, f2, f3)
+#define ia64_fmix_lr(code, f1, f2, f3) ia64_fmix_lr_pred ((code), 0, f1, f2, f3)
+#define ia64_fmix_r(code, f1, f2, f3) ia64_fmix_r_pred ((code), 0, f1, f2, f3)
+#define ia64_fmix_l(code, f1, f2, f3) ia64_fmix_l_pred ((code), 0, f1, f2, f3)
+#define ia64_fsxt_r(code, f1, f2, f3) ia64_fsxt_r_pred ((code), 0, f1, f2, f3)
+#define ia64_fsxt_l(code, f1, f2, f3) ia64_fsxt_l_pred ((code), 0, f1, f2, f3)
+#define ia64_fpack(code, f1, f2, f3) ia64_fpack_pred ((code), 0, f1, f2, f3)
+#define ia64_fswap(code, f1, f2, f3) ia64_fswap_pred ((code), 0, f1, f2, f3)
+#define ia64_fswap_nl(code, f1, f2, f3) ia64_fswap_nl_pred ((code), 0, f1, f2, f3)
+#define ia64_fswap_nr(code, f1, f2, f3) ia64_fswap_nr_pred ((code), 0, f1, f2, f3)
+#define ia64_fand(code, f1, f2, f3) ia64_fand_pred ((code), 0, f1, f2, f3)
+#define ia64_fandcm(code, f1, f2, f3) ia64_fandcm_pred ((code), 0, f1, f2, f3)
+#define ia64_for(code, f1, f2, f3) ia64_for_pred ((code), 0, f1, f2, f3)
+#define ia64_fxor(code, f1, f2, f3) ia64_fxor_pred ((code), 0, f1, f2, f3)
+#define ia64_fpmerge_s(code, f1, f2, f3) ia64_fpmerge_s_pred ((code), 0, f1, f2, f3)
+#define ia64_fpmerge_ns(code, f1, f2, f3) ia64_fpmerge_ns_pred ((code), 0, f1, f2, f3)
+#define ia64_fpmerge_se(code, f1, f2, f3) ia64_fpmerge_se_pred ((code), 0, f1, f2, f3)
+
+/* Pseudo ops */
+#define ia64_fmov(code, f1, f3) ia64_fmov_pred ((code), 0, (f1), (f3))
+
+#define ia64_fcvt_fx_sf(code, f1, f2, sf) ia64_fcvt_fx_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fcvt_fxu_sf(code, f1, f2, sf) ia64_fcvt_fxu_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fcvt_fx_trunc_sf(code, f1, f2, sf) ia64_fcvt_fx_trunc_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fcvt_fxu_trunc_sf(code, f1, f2, sf) ia64_fcvt_fxu_trunc_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fx_sf(code, f1, f2, sf) ia64_fpcvt_fx_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fxu_sf(code, f1, f2, sf) ia64_fpcvt_fxu_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fx_trunc_sf(code, f1, f2, sf) ia64_fpcvt_fx_trunc_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fxu_trunc_sf(code, f1, f2, sf) ia64_fpcvt_fxu_trunc_sf_pred ((code), 0, f1, f2, sf)
+
+#define ia64_fcvt_xf(code, f1, f2) ia64_fcvt_xf_pred ((code), 0, f1, f2)
+
+#define ia64_fsetc_sf(code, amask, omask, sf) ia64_fsetc_sf_pred ((code), 0, amask, omask, sf)
+
+#define ia64_fclrf_sf(code, sf) ia64_fclrf_sf_pred ((code), 0, sf)
+
+#define ia64_fchkf_sf(code, disp, sf) ia64_fchkf_sf_pred ((code), 0, disp, sf)
+
+#define ia64_break_f(code, imm) ia64_break_f_pred ((code), 0, imm)
+
+
+#endif
diff --git a/lib/ffts/src/arch/mips/.gitignore b/lib/ffts/src/arch/mips/.gitignore
new file mode 100644
index 0000000..13efac7
--- /dev/null
+++ b/lib/ffts/src/arch/mips/.gitignore
@@ -0,0 +1,6 @@
+/
+/Makefile
+/Makefile.in
+/*.o
+/*.lo
+/.deps
diff --git a/lib/ffts/src/arch/mips/Makefile.am b/lib/ffts/src/arch/mips/Makefile.am
new file mode 100644
index 0000000..1063365
--- /dev/null
+++ b/lib/ffts/src/arch/mips/Makefile.am
@@ -0,0 +1,8 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-mips.la
+
+libmonoarch_mips_la_SOURCES = mips-codegen.h
+
+noinst_PROGRAMS = test
diff --git a/lib/ffts/src/arch/mips/mips-codegen.h b/lib/ffts/src/arch/mips/mips-codegen.h
new file mode 100644
index 0000000..1dbd1c6
--- /dev/null
+++ b/lib/ffts/src/arch/mips/mips-codegen.h
@@ -0,0 +1,435 @@
+#ifndef __MIPS_CODEGEN_H__
+#define __MIPS_CODEGEN_H__
+/*
+ * Copyright (c) 2004 Novell, Inc
+ * Author: Paolo Molaro (lupus@ximian.com)
+ *
+ */
+
+/* registers */
+enum {
+ mips_zero,
+ mips_at, /* assembler temp */
+ mips_v0, /* return values */
+ mips_v1,
+ mips_a0, /* 4 - func arguments */
+ mips_a1,
+ mips_a2,
+ mips_a3,
+#if _MIPS_SIM == _ABIO32
+ mips_t0, /* 8 temporaries */
+ mips_t1,
+ mips_t2,
+ mips_t3,
+ mips_t4,
+ mips_t5,
+ mips_t6,
+ mips_t7,
+#elif _MIPS_SIM == _ABIN32
+ mips_a4, /* 4 more argument registers */
+ mips_a5,
+ mips_a6,
+ mips_a7,
+ mips_t0, /* 4 temporaries */
+ mips_t1,
+ mips_t2,
+ mips_t3,
+#endif
+ mips_s0, /* 16 calle saved */
+ mips_s1,
+ mips_s2,
+ mips_s3,
+ mips_s4,
+ mips_s5,
+ mips_s6,
+ mips_s7,
+ mips_t8, /* 24 temps */
+ mips_t9, /* 25 temp / pic call-through register */
+ mips_k0, /* 26 kernel-reserved */
+ mips_k1,
+ mips_gp, /* 28 */
+ mips_sp, /* stack pointer */
+ mips_fp, /* frame pointer */
+ mips_ra /* return address */
+};
+
+/* we treat the register file as containing just doubles... */
+enum {
+ mips_f0, /* return regs */
+ mips_f1,
+ mips_f2,
+ mips_f3,
+ mips_f4, /* temps */
+ mips_f5,
+ mips_f6,
+ mips_f7,
+ mips_f8,
+ mips_f9,
+ mips_f10,
+ mips_f11,
+ mips_f12, /* first arg */
+ mips_f13,
+ mips_f14, /* second arg */
+ mips_f15,
+ mips_f16, /* temps */
+ mips_f17,
+ mips_f18,
+ mips_f19,
+ mips_f20, /* callee saved */
+ mips_f21,
+ mips_f22,
+ mips_f23,
+ mips_f24,
+ mips_f25,
+ mips_f26,
+ mips_f27,
+ mips_f28,
+ mips_f29,
+ mips_f30,
+ mips_f31
+};
+
+/* prefetch hints */
+enum {
+ MIPS_FOR_LOAD,
+ MIPS_FOR_STORE,
+ MIPS_FOR_LOAD_STREAMED = 4,
+ MIPS_FOR_STORE_STREAMED,
+ MIPS_FOR_LOAD_RETAINED,
+ MIPS_FOR_STORE_RETAINED
+};
+
+/* coprocessors */
+enum {
+ MIPS_COP0,
+ MIPS_COP1,
+ MIPS_COP2,
+ MIPS_COP3
+};
+
+enum {
+ MIPS_FMT_SINGLE = 16,
+ MIPS_FMT_DOUBLE = 17,
+ MIPS_FMT_WORD = 20,
+ MIPS_FMT_LONG = 21,
+ MIPS_FMT3_SINGLE = 0,
+ MIPS_FMT3_DOUBLE = 1
+};
+
+/* fpu rounding mode */
+enum {
+ MIPS_ROUND_TO_NEAREST,
+ MIPS_ROUND_TO_ZERO,
+ MIPS_ROUND_TO_POSINF,
+ MIPS_ROUND_TO_NEGINF,
+ MIPS_ROUND_MASK = 3
+};
+
+/* fpu enable/cause flags, cc */
+enum {
+ MIPS_FPU_C_MASK = 1 << 23,
+ MIPS_INEXACT = 1,
+ MIPS_UNDERFLOW = 2,
+ MIPS_OVERFLOW = 4,
+ MIPS_DIVZERO = 8,
+ MIPS_INVALID = 16,
+ MIPS_NOTIMPL = 32,
+ MIPS_FPU_FLAGS_OFFSET = 2,
+ MIPS_FPU_ENABLES_OFFSET = 7,
+ MIPS_FPU_CAUSES_OFFSET = 12
+};
+
+/* fpu condition values - see manual entry for C.cond.fmt instructions */
+enum {
+ MIPS_FPU_F,
+ MIPS_FPU_UN,
+ MIPS_FPU_EQ,
+ MIPS_FPU_UEQ,
+ MIPS_FPU_OLT,
+ MIPS_FPU_ULT,
+ MIPS_FPU_OLE,
+ MIPS_FPU_ULE,
+ MIPS_FPU_SF,
+ MIPS_FPU_NGLE,
+ MIPS_FPU_SEQ,
+ MIPS_FPU_NGL,
+ MIPS_FPU_LT,
+ MIPS_FPU_NGE,
+ MIPS_FPU_LE,
+ MIPS_FPU_NGT
+};
+
+#if SIZEOF_REGISTER == 4
+
+#define MIPS_SW mips_sw
+#define MIPS_LW mips_lw
+#define MIPS_ADDU mips_addu
+#define MIPS_ADDIU mips_addiu
+#define MIPS_SWC1 mips_swc1
+#define MIPS_LWC1 mips_lwc1
+#define MIPS_MOVE mips_move
+
+#elif SIZEOF_REGISTER == 8
+
+#define MIPS_SW mips_sd
+#define MIPS_LW mips_ld
+#define MIPS_ADDU mips_daddu
+#define MIPS_ADDIU mips_daddiu
+#define MIPS_SWC1 mips_sdc1
+#define MIPS_LWC1 mips_ldc1
+#define MIPS_MOVE mips_dmove
+
+#else
+#error Unknown SIZEOF_REGISTER
+#endif
+
+#define mips_emit32(c,x) do { \
+ *((guint32 *) (void *)(c)) = x; \
+ (c) = (typeof(c))(((guint32 *)(void *)(c)) + 1); \
+ } while (0)
+
+#define mips_format_i(code,op,rs,rt,imm) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((imm)&0xffff)))
+#define mips_format_j(code,op,imm) mips_emit32 ((code), (((op)<<26)|((imm)&0x03ffffff)))
+#define mips_format_r(code,op,rs,rt,rd,sa,func) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((rd)<<11)|((sa)<<6)|(func)))
+#define mips_format_divmul(code,op,src1,src2,fun) mips_emit32 ((code), (((op)<<26)|((src1)<<21)|((src2)<<16)|(fun)))
+
+#define mips_is_imm16(val) ((gint)(gshort)(gint)(val) == (gint)(val))
+
+/* Load always using lui/addiu pair (for later patching) */
+#define mips_load(c,D,v) do { \
+ if (((guint32)(v)) & (1 << 15)) { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \
+ } \
+ else { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \
+ } \
+ mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \
+ } while (0)
+
+/* load constant - no patch-up */
+#define mips_load_const(c,D,v) do { \
+ if (!mips_is_imm16 ((v))) { \
+ if (((guint32)(v)) & (1 << 15)) { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \
+ } \
+ else { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \
+ } \
+ if (((guint32)(v)) & 0xffff) \
+ mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \
+ } \
+ else \
+ mips_addiu ((c), (D), mips_zero, ((guint32)(v)) & 0xffff); \
+ } while (0)
+
+/* arithmetric ops */
+#define mips_add(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,32)
+#define mips_addi(c,dest,src1,imm) mips_format_i(c,8,src1,dest,imm)
+#define mips_addu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,33)
+#define mips_addiu(c,dest,src1,imm) mips_format_i(c,9,src1,dest,imm)
+#define mips_dadd(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,44)
+#define mips_daddi(c,dest,src1,imm) mips_format_i(c,24,src1,dest,imm)
+#define mips_daddu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,45)
+#define mips_daddiu(c,dest,src1,imm) mips_format_i(c,25,src1,dest,imm)
+#define mips_dsub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,46)
+#define mips_dsubu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,47)
+#define mips_mul(c,dest,src1,src2) mips_format_r(c,28,src1,src2,dest,0,2)
+#define mips_sub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,34)
+#define mips_subu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,35)
+
+/* div and mul ops */
+#define mips_ddiv(c,src1,src2) mips_format_divmul(c,0,src1,src2,30)
+#define mips_ddivu(c,src1,src2) mips_format_divmul(c,0,src1,src2,31)
+#define mips_div(c,src1,src2) mips_format_divmul(c,0,src1,src2,26)
+#define mips_divu(c,src1,src2) mips_format_divmul(c,0,src1,src2,27)
+#define mips_dmult(c,src1,src2) mips_format_divmul(c,0,src1,src2,28)
+#define mips_dmultu(c,src1,src2) mips_format_divmul(c,0,src1,src2,29)
+#define mips_mult(c,src1,src2) mips_format_divmul(c,0,src1,src2,24)
+#define mips_multu(c,src1,src2) mips_format_divmul(c,0,src1,src2,25)
+
+/* shift ops */
+#define mips_dsll(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,56)
+#define mips_dsll32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,60)
+#define mips_dsllv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,20)
+#define mips_dsra(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,59)
+#define mips_dsra32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,63)
+#define mips_dsrav(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,23)
+#define mips_dsrl(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,58)
+#define mips_dsrl32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,62)
+#define mips_dsrlv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,22)
+#define mips_sll(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,0)
+#define mips_sllv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,4)
+#define mips_sra(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,3)
+#define mips_srav(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,7)
+#define mips_srl(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,2)
+#define mips_srlv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,6)
+
+/* logical ops */
+#define mips_and(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,36)
+#define mips_andi(c,dest,src1,imm) mips_format_i(c,12,src1,dest,imm)
+#define mips_nor(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,39)
+#define mips_or(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,37)
+#define mips_ori(c,dest,src1,uimm) mips_format_i(c,13,src1,dest,uimm)
+#define mips_xor(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,38)
+#define mips_xori(c,dest,src1,uimm) mips_format_i(c,14,src1,dest,uimm)
+
+/* compares */
+#define mips_slt(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,42)
+#define mips_slti(c,dest,src1,imm) mips_format_i(c,10,src1,dest,imm)
+#define mips_sltiu(c,dest,src1,imm) mips_format_i(c,11,src1,dest,imm)
+#define mips_sltu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,43)
+/* missing traps: teq, teqi, tge, tgei, tgeiu, tgeu, tlt, tlti, tltiu, tltu, tne, tnei, */
+
+/* conditional branches */
+#define mips_beq(c,src1,src2,offset) mips_format_i(c,4,src1,src2,offset)
+#define mips_beql(c,src1,src2,offset) mips_format_i(c,20,src1,src2,offset)
+#define mips_bgez(c,src1,offset) mips_format_i(c,1,src1,1,offset)
+#define mips_bgezal(c,src1,offset) mips_format_i(c,1,src1,17,offset)
+#define mips_bgezall(c,src1,offset) mips_format_i(c,1,src1,19,offset)
+#define mips_bgezl(c,src1,offset) mips_format_i(c,1,src1,3,offset)
+#define mips_bgtz(c,src1,offset) mips_format_i(c,7,src1,0,offset)
+#define mips_bgtzl(c,src1,offset) mips_format_i(c,23,src1,0,offset)
+#define mips_blez(c,src1,offset) mips_format_i(c,6,src1,0,offset)
+#define mips_blezl(c,src1,offset) mips_format_i(c,22,src1,0,offset)
+#define mips_bltz(c,src1,offset) mips_format_i(c,1,src1,0,offset)
+#define mips_bltzal(c,src1,offset) mips_format_i(c,1,src1,16,offset)
+#define mips_bltzall(c,src1,offset) mips_format_i(c,1,src1,18,offset)
+#define mips_bltzl(c,src1,offset) mips_format_i(c,1,src1,2,offset)
+#define mips_bne(c,src1,src2,offset) mips_format_i(c,5,src1,src2,offset)
+#define mips_bnel(c,src1,src2,offset) mips_format_i(c,21,src1,src2,offset)
+
+/* uncond branches and calls */
+#define mips_jump(c,target) mips_format_j(c,2,target)
+#define mips_jumpl(c,target) mips_format_j(c,3,target)
+#define mips_jalr(c,src1,retreg) mips_format_r(c,0,src1,0,retreg,0,9)
+#define mips_jr(c,src1) mips_emit32(c,((src1)<<21)|8)
+
+/* loads and stores */
+#define mips_lb(c,dest,base,offset) mips_format_i(c,32,base,dest,offset)
+#define mips_lbu(c,dest,base,offset) mips_format_i(c,36,base,dest,offset)
+#define mips_ld(c,dest,base,offset) mips_format_i(c,55,base,dest,offset)
+#define mips_ldl(c,dest,base,offset) mips_format_i(c,26,base,dest,offset)
+#define mips_ldr(c,dest,base,offset) mips_format_i(c,27,base,dest,offset)
+#define mips_lh(c,dest,base,offset) mips_format_i(c,33,base,dest,offset)
+#define mips_lhu(c,dest,base,offset) mips_format_i(c,37,base,dest,offset)
+#define mips_ll(c,dest,base,offset) mips_format_i(c,48,base,dest,offset)
+#define mips_lld(c,dest,base,offset) mips_format_i(c,52,base,dest,offset)
+#define mips_lui(c,dest,base,uimm) mips_format_i(c,15,base,dest,uimm)
+#define mips_lw(c,dest,base,offset) mips_format_i(c,35,base,dest,offset)
+#define mips_lwl(c,dest,base,offset) mips_format_i(c,34,base,dest,offset)
+#define mips_lwr(c,dest,base,offset) mips_format_i(c,38,base,dest,offset)
+#define mips_lwu(c,dest,base,offset) mips_format_i(c,39,base,dest,offset)
+
+#define mips_sb(c,src,base,offset) mips_format_i(c,40,base,src,offset)
+#define mips_sc(c,src,base,offset) mips_format_i(c,56,base,src,offset)
+#define mips_scd(c,src,base,offset) mips_format_i(c,60,base,src,offset)
+#define mips_sd(c,src,base,offset) mips_format_i(c,63,base,src,offset)
+#define mips_sdl(c,src,base,offset) mips_format_i(c,44,base,src,offset)
+#define mips_sdr(c,src,base,offset) mips_format_i(c,45,base,src,offset)
+#define mips_sh(c,src,base,offset) mips_format_i(c,41,base,src,offset)
+#define mips_sw(c,src,base,offset) mips_format_i(c,43,base,src,offset)
+#define mips_swl(c,src,base,offset) mips_format_i(c,50,base,src,offset)
+#define mips_swr(c,src,base,offset) mips_format_i(c,54,base,src,offset)
+
+/* misc and coprocessor ops */
+#define mips_move(c,dest,src) mips_addu(c,dest,src,mips_zero)
+#define mips_dmove(c,dest,src) mips_daddu(c,dest,src,mips_zero)
+#define mips_nop(c) mips_or(c,mips_at,mips_at,0)
+#define mips_break(c,code) mips_emit32(c, ((code)<<6)|13)
+#define mips_mfhi(c,dest) mips_format_r(c,0,0,0,dest,0,16)
+#define mips_mflo(c,dest) mips_format_r(c,0,0,0,dest,0,18)
+#define mips_mthi(c,src) mips_format_r(c,0,src,0,0,0,17)
+#define mips_mtlo(c,src) mips_format_r(c,0,src,0,0,0,19)
+#define mips_movn(c,dest,src,test) mips_format_r(c,0,src,test,dest,0,11)
+#define mips_movz(c,dest,src,test) mips_format_r(c,0,src,test,dest,0,10)
+#define mips_pref(c,hint,base,offset) mips_format_i(c,51,base,hint,offset)
+#define mips_prefidx(c,hint,base,idx) mips_format_r(c,19,base,idx,hint,0,15)
+#define mips_sync(c,stype) mips_emit32(c, ((stype)<<6)|15)
+#define mips_syscall(c,code) mips_emit32(c, ((code)<<6)|12)
+
+#define mips_cop(c,cop,fun) mips_emit32(c, ((16|(cop))<<26)|(fun))
+#define mips_ldc(c,cop,dest,base,offset) mips_format_i(c,(52|(cop)),base,dest,offset)
+#define mips_lwc(c,cop,dest,base,offset) mips_format_i(c,(48|(cop)),base,dest,offset)
+#define mips_sdc(c,cop,src,base,offset) mips_format_i(c,(60|(cop)),base,src,offset)
+#define mips_swc(c,cop,src,base,offset) mips_format_i(c,(56|(cop)),base,src,offset)
+#define mips_cfc1(c,dest,src) mips_format_r(c,17,2,dest,src,0,0)
+#define mips_ctc1(c,dest,src) mips_format_r(c,17,6,dest,src,0,0)
+
+/* fpu ops */
+#define mips_fabss(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,5)
+#define mips_fabsd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,5)
+#define mips_fadds(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,0)
+#define mips_faddd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,0)
+#define mips_fdivs(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,3)
+#define mips_fdivd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,3)
+#define mips_fmuls(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,2)
+#define mips_fmuld(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,2)
+#define mips_fnegs(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,7)
+#define mips_fnegd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,7)
+#define mips_fsqrts(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,4)
+#define mips_fsqrtd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,4)
+#define mips_fsubs(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,1)
+#define mips_fsubd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,1)
+#define mips_madds(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,32|MIPS_FMT_SINGLE)
+#define mips_maddd(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,32|MIPS_FMT_DOUBLE)
+#define mips_nmadds(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,48|MIPS_FMT_SINGLE)
+#define mips_nmaddd(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,48|MIPS_FMT_DOUBLE)
+#define mips_msubs(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,40|MIPS_FMT_SINGLE)
+#define mips_msubd(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,40|MIPS_FMT_DOUBLE)
+#define mips_nmsubs(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,56|MIPS_FMT_SINGLE)
+#define mips_nmsubd(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,56|MIPS_FMT_DOUBLE)
+
+/* fp compare and branch */
+#define mips_fcmps(c,cond,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,0,(3<<4)|(cond))
+#define mips_fcmpd(c,cond,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,0,(3<<4)|(cond))
+#define mips_fbfalse(c,offset) mips_format_i(c,17,8,0,offset)
+#define mips_fbfalsel(c,offset) mips_format_i(c,17,8,2,offset)
+#define mips_fbtrue(c,offset) mips_format_i(c,17,8,1,offset)
+#define mips_fbtruel(c,offset) mips_format_i(c,17,8,3,offset)
+
+/* fp convert */
+#define mips_ceills(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,10)
+#define mips_ceilld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,10)
+#define mips_ceilws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,14)
+#define mips_ceilwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,14)
+#define mips_cvtds(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,33)
+#define mips_cvtdw(c,dest,src) mips_format_r(c,17,MIPS_FMT_WORD,0,src,dest,33)
+#define mips_cvtdl(c,dest,src) mips_format_r(c,17,MIPS_FMT_LONG,0,src,dest,33)
+#define mips_cvtls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,37)
+#define mips_cvtld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,37)
+#define mips_cvtsd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,32)
+#define mips_cvtsw(c,dest,src) mips_format_r(c,17,MIPS_FMT_WORD,0,src,dest,32)
+#define mips_cvtsl(c,dest,src) mips_format_r(c,17,MIPS_FMT_LONG,0,src,dest,32)
+#define mips_cvtws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,36)
+#define mips_cvtwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,36)
+#define mips_floorls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,11)
+#define mips_floorld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,11)
+#define mips_floorws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,15)
+#define mips_floorwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,15)
+#define mips_roundls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,8)
+#define mips_roundld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,8)
+#define mips_roundws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,12)
+#define mips_roundwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,12)
+#define mips_truncls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,9)
+#define mips_truncld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,9)
+#define mips_truncws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,13)
+#define mips_truncwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,13)
+
+/* fp moves, loads */
+#define mips_fmovs(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,6)
+#define mips_fmovd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,6)
+#define mips_mfc1(c,dest,src) mips_format_r(c,17,0,dest,src,0,0)
+#define mips_mtc1(c,dest,src) mips_format_r(c,17,4,src,dest,0,0)
+#define mips_dmfc1(c,dest,src) mips_format_r(c,17,1,0,dest,src,0)
+#define mips_dmtc1(c,dest,src) mips_format_r(c,17,1,0,src,dest,0)
+#define mips_ldc1(c,dest,base,offset) mips_ldc(c,1,dest,base,offset)
+#define mips_ldxc1(c,dest,base,idx) mips_format_r(c,19,base,idx,0,dest,1)
+#define mips_lwc1(c,dest,base,offset) mips_lwc(c,1,dest,base,offset)
+#define mips_lwxc1(c,dest,base,idx) mips_format_r(c,19,base,idx,0,dest,0)
+#define mips_sdc1(c,src,base,offset) mips_sdc(c,1,src,base,offset)
+#define mips_sdxc1(c,src,base,idx) mips_format_r(c,19,base,idx,src,0,9)
+#define mips_swc1(c,src,base,offset) mips_swc(c,1,src,base,offset)
+#define mips_swxc1(c,src,base,idx) mips_format_r(c,19,base,idx,src,0,8)
+
+#endif /* __MIPS_CODEGEN_H__ */
+
diff --git a/lib/ffts/src/arch/mips/test.c b/lib/ffts/src/arch/mips/test.c
new file mode 100644
index 0000000..4f5e1ad
--- /dev/null
+++ b/lib/ffts/src/arch/mips/test.c
@@ -0,0 +1,159 @@
+#include "config.h"
+#include <stdlib.h>
+#include <string.h>
+
+#define NO_MIPS_JIT_DEBUG
+
+#include "mips-codegen.h"
+#include "mono/metadata/class.h"
+
+/* don't run the resulting program, it will destroy your computer,
+ * just objdump -d it to inspect we generated the correct assembler.
+ */
+
+int main (int argc, char *argv[]) {
+ guint32 *code, * p;
+
+ code = p = (guint32 *) malloc (sizeof (guint32) * 1024);
+
+ mips_add (p, 3, 4, 5);
+ mips_addi (p, 3, 4, 5);
+ mips_addu (p, 3, 4, 5);
+ mips_addiu (p, 3, 4, 5);
+ mips_sub (p, 3, 4, 5);
+ mips_subu (p, 3, 4, 5);
+ mips_dadd (p, 3, 4, 5);
+ mips_daddi (p, 3, 4, 5);
+ mips_daddu (p, 3, 4, 5);
+ mips_daddiu (p, 3, 4, 5);
+ mips_dsub (p, 3, 4, 5);
+ mips_dsubu (p, 3, 4, 5);
+
+ mips_mult (p, 6, 7);
+ mips_multu (p, 6, 7);
+ mips_div (p, 6, 7);
+ mips_divu (p, 6, 7);
+ mips_dmult (p, 6, 7);
+ mips_dmultu (p, 6, 7);
+ mips_ddiv (p, 6, 7);
+ mips_ddivu (p, 6, 7);
+
+ mips_sll (p, 3, 4, 5);
+ mips_sllv (p, 3, 4, 5);
+ mips_sra (p, 3, 4, 5);
+ mips_srav (p, 3, 4, 5);
+ mips_srl (p, 3, 4, 5);
+ mips_srlv (p, 3, 4, 5);
+ mips_dsll (p, 3, 4, 5);
+ mips_dsll32 (p, 3, 4, 5);
+ mips_dsllv (p, 3, 4, 5);
+ mips_dsra (p, 3, 4, 5);
+ mips_dsra32 (p, 3, 4, 5);
+ mips_dsrav (p, 3, 4, 5);
+ mips_dsrl (p, 3, 4, 5);
+ mips_dsrl32 (p, 3, 4, 5);
+ mips_dsrlv (p, 3, 4, 5);
+
+ mips_and (p, 8, 9, 10);
+ mips_andi (p, 8, 9, 10);
+ mips_nor (p, 8, 9, 10);
+ mips_or (p, 8, 9, 10);
+ mips_ori (p, 8, 9, 10);
+ mips_xor (p, 8, 9, 10);
+ mips_xori (p, 8, 9, 10);
+
+ mips_slt (p, 8, 9, 10);
+ mips_slti (p, 8, 9, 10);
+ mips_sltu (p, 8, 9, 10);
+ mips_sltiu (p, 8, 9, 10);
+
+ mips_beq (p, 8, 9, 0xff1f);
+ mips_beql (p, 8, 9, 0xff1f);
+ mips_bne (p, 8, 9, 0xff1f);
+ mips_bnel (p, 8, 9, 0xff1f);
+ mips_bgez (p, 11, 0xff1f);
+ mips_bgezal (p, 11, 0xff1f);
+ mips_bgezall (p, 11, 0xff1f);
+ mips_bgezl (p, 11, 0xff1f);
+ mips_bgtz (p, 11, 0xff1f);
+ mips_bgtzl (p, 11, 0xff1f);
+ mips_blez (p, 11, 0xff1f);
+ mips_blezl (p, 11, 0xff1f);
+ mips_bltz (p, 11, 0xff1f);
+ mips_bltzal (p, 11, 0xff1f);
+ mips_bltzall (p, 11, 0xff1f);
+ mips_bltzl (p, 11, 0xff1f);
+
+ mips_jump (p, 0xff1f);
+ mips_jumpl (p, 0xff1f);
+ mips_jalr (p, 12, mips_ra);
+ mips_jr (p, 12);
+
+ mips_lb (p, 13, 14, 128);
+ mips_lbu (p, 13, 14, 128);
+ mips_ld (p, 13, 14, 128);
+ mips_ldl (p, 13, 14, 128);
+ mips_ldr (p, 13, 14, 128);
+ mips_lh (p, 13, 14, 128);
+ mips_lhu (p, 13, 14, 128);
+ mips_ll (p, 13, 14, 128);
+ mips_lld (p, 13, 14, 128);
+ mips_lui (p, 13, 14, 128);
+ mips_lw (p, 13, 14, 128);
+ mips_lwl (p, 13, 14, 128);
+ mips_lwr (p, 13, 14, 128);
+ mips_lwu (p, 13, 14, 128);
+ mips_sb (p, 13, 14, 128);
+ mips_sc (p, 13, 14, 128);
+ mips_scd (p, 13, 14, 128);
+ mips_sd (p, 13, 14, 128);
+ mips_sdl (p, 13, 14, 128);
+ mips_sdr (p, 13, 14, 128);
+ mips_sh (p, 13, 14, 128);
+ mips_sw (p, 13, 14, 128);
+ mips_swl (p, 13, 14, 128);
+ mips_swr (p, 13, 14, 128);
+
+ mips_move (p, 15, 16);
+ mips_nop (p);
+ mips_break (p, 0);
+ mips_sync (p, 0);
+ mips_mfhi (p, 17);
+ mips_mflo (p, 17);
+ mips_mthi (p, 17);
+ mips_mtlo (p, 17);
+
+ mips_fabsd (p, 16, 18);
+ mips_fnegd (p, 16, 18);
+ mips_fsqrtd (p, 16, 18);
+ mips_faddd (p, 16, 18, 20);
+ mips_fdivd (p, 16, 18, 20);
+ mips_fmuld (p, 16, 18, 20);
+ mips_fsubd (p, 16, 18, 20);
+
+ mips_fcmpd (p, MIPS_FPU_EQ, 18, 20);
+ mips_fbfalse (p, 0xff1f);
+ mips_fbfalsel (p, 0xff1f);
+ mips_fbtrue (p, 0xff1f);
+ mips_fbtruel (p, 0xff1f);
+
+ mips_ceilwd (p, 20, 22);
+ mips_ceilld (p, 20, 22);
+ mips_floorwd (p, 20, 22);
+ mips_floorld (p, 20, 22);
+ mips_roundwd (p, 20, 22);
+ mips_roundld (p, 20, 22);
+ mips_truncwd (p, 20, 22);
+ mips_truncld (p, 20, 22);
+ mips_cvtdw (p, 20, 22);
+ mips_cvtds (p, 20, 22);
+ mips_cvtdl (p, 20, 22);
+ mips_cvtld (p, 20, 22);
+ mips_cvtsd (p, 20, 22);
+ mips_cvtwd (p, 20, 22);
+
+ mips_fmovd (p, 20, 22);
+ printf ("size: %d\n", p - code);
+
+ return 0;
+}
diff --git a/lib/ffts/src/arch/ppc/.gitignore b/lib/ffts/src/arch/ppc/.gitignore
new file mode 100644
index 0000000..c577ff6
--- /dev/null
+++ b/lib/ffts/src/arch/ppc/.gitignore
@@ -0,0 +1,7 @@
+/Makefile
+/Makefile.in
+/.libs
+/.deps
+/*.la
+/*.lo
+/test
diff --git a/lib/ffts/src/arch/ppc/Makefile.am b/lib/ffts/src/arch/ppc/Makefile.am
new file mode 100644
index 0000000..9b209ef
--- /dev/null
+++ b/lib/ffts/src/arch/ppc/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = ppc-codegen.h \ No newline at end of file
diff --git a/lib/ffts/src/arch/ppc/ppc-codegen.h b/lib/ffts/src/arch/ppc/ppc-codegen.h
new file mode 100644
index 0000000..55b5060
--- /dev/null
+++ b/lib/ffts/src/arch/ppc/ppc-codegen.h
@@ -0,0 +1,953 @@
+/*
+ Authors:
+ Radek Doulik
+ Christopher Taylor <ct_AT_clemson_DOT_edu>
+ Andreas Faerber <andreas.faerber@web.de>
+
+ Copyright (C) 2001 Radek Doulik
+ Copyright (C) 2007-2008 Andreas Faerber
+
+ for testing do the following: ./test | as -o test.o
+*/
+
+#ifndef __MONO_PPC_CODEGEN_H__
+#define __MONO_PPC_CODEGEN_H__
+#include <glib.h>
+#include <assert.h>
+
+typedef enum {
+ ppc_r0 = 0,
+ ppc_r1,
+ ppc_sp = ppc_r1,
+ ppc_r2,
+ ppc_r3,
+ ppc_r4,
+ ppc_r5,
+ ppc_r6,
+ ppc_r7,
+ ppc_r8,
+ ppc_r9,
+ ppc_r10,
+ ppc_r11,
+ ppc_r12,
+ ppc_r13,
+ ppc_r14,
+ ppc_r15,
+ ppc_r16,
+ ppc_r17,
+ ppc_r18,
+ ppc_r19,
+ ppc_r20,
+ ppc_r21,
+ ppc_r22,
+ ppc_r23,
+ ppc_r24,
+ ppc_r25,
+ ppc_r26,
+ ppc_r27,
+ ppc_r28,
+ ppc_r29,
+ ppc_r30,
+ ppc_r31
+} PPCIntRegister;
+
+typedef enum {
+ ppc_f0 = 0,
+ ppc_f1,
+ ppc_f2,
+ ppc_f3,
+ ppc_f4,
+ ppc_f5,
+ ppc_f6,
+ ppc_f7,
+ ppc_f8,
+ ppc_f9,
+ ppc_f10,
+ ppc_f11,
+ ppc_f12,
+ ppc_f13,
+ ppc_f14,
+ ppc_f15,
+ ppc_f16,
+ ppc_f17,
+ ppc_f18,
+ ppc_f19,
+ ppc_f20,
+ ppc_f21,
+ ppc_f22,
+ ppc_f23,
+ ppc_f24,
+ ppc_f25,
+ ppc_f26,
+ ppc_f27,
+ ppc_f28,
+ ppc_f29,
+ ppc_f30,
+ ppc_f31
+} PPCFloatRegister;
+
+typedef enum {
+ ppc_lr = 256,
+ ppc_ctr = 256 + 32,
+ ppc_xer = 32
+} PPCSpecialRegister;
+
+enum {
+ /* B0 operand for branches */
+ PPC_BR_DEC_CTR_NONZERO_FALSE = 0,
+ PPC_BR_LIKELY = 1, /* can be or'ed with the conditional variants */
+ PPC_BR_DEC_CTR_ZERO_FALSE = 2,
+ PPC_BR_FALSE = 4,
+ PPC_BR_DEC_CTR_NONZERO_TRUE = 8,
+ PPC_BR_DEC_CTR_ZERO_TRUE = 10,
+ PPC_BR_TRUE = 12,
+ PPC_BR_DEC_CTR_NONZERO = 16,
+ PPC_BR_DEC_CTR_ZERO = 18,
+ PPC_BR_ALWAYS = 20,
+ /* B1 operand for branches */
+ PPC_BR_LT = 0,
+ PPC_BR_GT = 1,
+ PPC_BR_EQ = 2,
+ PPC_BR_SO = 3
+};
+
+enum {
+ PPC_TRAP_LT = 1,
+ PPC_TRAP_GT = 2,
+ PPC_TRAP_EQ = 4,
+ PPC_TRAP_LT_UN = 8,
+ PPC_TRAP_GT_UN = 16,
+ PPC_TRAP_LE = 1 + PPC_TRAP_EQ,
+ PPC_TRAP_GE = 2 + PPC_TRAP_EQ,
+ PPC_TRAP_LE_UN = 8 + PPC_TRAP_EQ,
+ PPC_TRAP_GE_UN = 16 + PPC_TRAP_EQ
+};
+
+#define ppc_emit32(c,x) do { *((guint32 *) (c)) = GUINT32_TO_BE (x); (c) = (gpointer)((guint8 *)(c) + sizeof (guint32));} while (0)
+
+#define ppc_is_imm16(val) ((((val)>> 15) == 0) || (((val)>> 15) == -1))
+#define ppc_is_uimm16(val) ((glong)(val) >= 0L && (glong)(val) <= 65535L)
+#define ppc_ha(val) (((val >> 16) + ((val & 0x8000) ? 1 : 0)) & 0xffff)
+
+#define ppc_load32(c,D,v) G_STMT_START { \
+ ppc_lis ((c), (D), (guint32)(v) >> 16); \
+ ppc_ori ((c), (D), (D), (guint32)(v) & 0xffff); \
+ } G_STMT_END
+
+/* Macros to load/store pointer sized quantities */
+
+#if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
+
+#define ppc_ldptr(c,D,d,A) ppc_ld ((c), (D), (d), (A))
+#define ppc_ldptr_update(c,D,d,A) ppc_ldu ((c), (D), (d), (A))
+#define ppc_ldptr_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B))
+#define ppc_ldptr_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B))
+
+#define ppc_stptr(c,S,d,A) ppc_std ((c), (S), (d), (A))
+#define ppc_stptr_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A))
+#define ppc_stptr_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B))
+#define ppc_stptr_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B))
+
+#else
+
+/* Same as ppc32 */
+#define ppc_ldptr(c,D,d,A) ppc_lwz ((c), (D), (d), (A))
+#define ppc_ldptr_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A))
+#define ppc_ldptr_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B))
+#define ppc_ldptr_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B))
+
+#define ppc_stptr(c,S,d,A) ppc_stw ((c), (S), (d), (A))
+#define ppc_stptr_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A))
+#define ppc_stptr_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B))
+#define ppc_stptr_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B))
+
+#endif
+
+/* Macros to load pointer sized immediates */
+#define ppc_load_ptr(c,D,v) ppc_load ((c),(D),(gsize)(v))
+#define ppc_load_ptr_sequence(c,D,v) ppc_load_sequence ((c),(D),(gsize)(v))
+
+/* Macros to load/store regsize quantities */
+
+#ifdef __mono_ppc64__
+#define ppc_ldr(c,D,d,A) ppc_ld ((c), (D), (d), (A))
+#define ppc_ldr_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B))
+#define ppc_str(c,S,d,A) ppc_std ((c), (S), (d), (A))
+#define ppc_str_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A))
+#define ppc_str_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B))
+#define ppc_str_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B))
+#else
+#define ppc_ldr(c,D,d,A) ppc_lwz ((c), (D), (d), (A))
+#define ppc_ldr_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B))
+#define ppc_str(c,S,d,A) ppc_stw ((c), (S), (d), (A))
+#define ppc_str_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A))
+#define ppc_str_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B))
+#define ppc_str_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B))
+#endif
+
+#define ppc_str_multiple(c,S,d,A) ppc_store_multiple_regs((c),(S),(d),(A))
+#define ppc_ldr_multiple(c,D,d,A) ppc_load_multiple_regs((c),(D),(d),(A))
+
+/* PPC32 macros */
+
+#ifndef __mono_ppc64__
+
+#define ppc_load_sequence(c,D,v) ppc_load32 ((c), (D), (guint32)(v))
+
+#define PPC_LOAD_SEQUENCE_LENGTH 8
+
+#define ppc_load(c,D,v) G_STMT_START { \
+ if (ppc_is_imm16 ((guint32)(v))) { \
+ ppc_li ((c), (D), (guint16)(guint32)(v)); \
+ } else { \
+ ppc_load32 ((c), (D), (guint32)(v)); \
+ } \
+ } G_STMT_END
+
+#define ppc_load_func(c,D,V) ppc_load_sequence ((c), (D), (V))
+
+#define ppc_load_multiple_regs(c,D,d,A) ppc_lmw ((c), (D), (d), (A))
+
+#define ppc_store_multiple_regs(c,S,d,A) ppc_stmw ((c), (S), (d), (A))
+
+#define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 0, (A), (B))
+#define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 0, (A), (B))
+#define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 0, (A), (B))
+
+#define ppc_shift_left(c,A,S,B) ppc_slw((c), (S), (A), (B))
+#define ppc_shift_left_imm(c,A,S,n) ppc_slwi((c), (A), (S), (n))
+
+#define ppc_shift_right_imm(c,A,S,B) ppc_srwi((c), (A), (S), (B))
+#define ppc_shift_right_arith_imm(c,A,S,B) ppc_srawi((c), (A), (S), (B))
+
+#define ppc_multiply(c,D,A,B) ppc_mullw((c), (D), (A), (B))
+
+#define ppc_clear_right_imm(c,A,S,n) ppc_clrrwi((c), (A), (S), (n))
+
+#endif
+
+#define ppc_opcode(c) ((c) >> 26)
+#define ppc_split_5_1_1(x) (((x) >> 5) & 0x1)
+#define ppc_split_5_1_5(x) ((x) & 0x1F)
+#define ppc_split_5_1(x) ((ppc_split_5_1_5(x) << 1) | ppc_split_5_1_1(x))
+
+#define ppc_break(c) ppc_tw((c),31,0,0)
+#define ppc_addi(c,D,A,i) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+#define ppc_addis(c,D,A,i) ppc_emit32 (c, (15 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+#define ppc_li(c,D,v) ppc_addi (c, D, 0, (guint16)(v))
+#define ppc_lis(c,D,v) ppc_addis (c, D, 0, (guint16)(v))
+#define ppc_lwz(c,D,d,A) ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_lhz(c,D,d,A) ppc_emit32 (c, (40 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_lbz(c,D,d,A) ppc_emit32 (c, (34 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stw(c,S,d,A) ppc_emit32 (c, (36 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_sth(c,S,d,A) ppc_emit32 (c, (44 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stb(c,S,d,A) ppc_emit32 (c, (38 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stwu(c,s,d,A) ppc_emit32 (c, (37 << 26) | ((s) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_or(c,a,s,b) ppc_emit32 (c, (31 << 26) | ((s) << 21) | ((a) << 16) | ((b) << 11) | 888)
+#define ppc_mr(c,a,s) ppc_or (c, a, s, s)
+#define ppc_ori(c,S,A,ui) ppc_emit32 (c, (24 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(ui))
+#define ppc_nop(c) ppc_ori (c, 0, 0, 0)
+#define ppc_mfspr(c,D,spr) ppc_emit32 (c, (31 << 26) | ((D) << 21) | ((spr) << 11) | (339 << 1))
+#define ppc_mflr(c,D) ppc_mfspr (c, D, ppc_lr)
+#define ppc_mtspr(c,spr,S) ppc_emit32 (c, (31 << 26) | ((S) << 21) | ((spr) << 11) | (467 << 1))
+#define ppc_mtlr(c,S) ppc_mtspr (c, ppc_lr, S)
+#define ppc_mtctr(c,S) ppc_mtspr (c, ppc_ctr, S)
+#define ppc_mtxer(c,S) ppc_mtspr (c, ppc_xer, S)
+
+#define ppc_b(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2))
+#define ppc_bl(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 1)
+#define ppc_ba(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 2)
+#define ppc_bla(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 3)
+#define ppc_blrl(c) ppc_emit32 (c, 0x4e800021)
+#define ppc_blr(c) ppc_emit32 (c, 0x4e800020)
+
+#define ppc_lfs(c,D,d,A) ppc_emit32 (c, (48 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_lfd(c,D,d,A) ppc_emit32 (c, (50 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stfs(c,S,d,a) ppc_emit32 (c, (52 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d))
+#define ppc_stfd(c,S,d,a) ppc_emit32 (c, (54 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d))
+
+/***********************************************************************
+The macros below were tapped out by Christopher Taylor <ct_AT_clemson_DOT_edu>
+from 18 November 2002 to 19 December 2002.
+
+Special thanks to rodo, lupus, dietmar, miguel, and duncan for patience,
+and motivation.
+
+The macros found in this file are based on the assembler instructions found
+in Motorola and Digital DNA's:
+
+"Programming Enviornments Manual For 32-bit Implementations of the PowerPC Architecture"
+
+MPCFPE32B/AD
+12/2001
+REV2
+
+see pages 326 - 524 for detailed information regarding each instruction
+
+Also see the "Ximian Copyright Agreement, 2002" for more information regarding
+my and Ximian's copyright to this code. ;)
+*************************************************************************/
+
+#define ppc_addx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (266 << 1) | Rc)
+#define ppc_add(c,D,A,B) ppc_addx(c,D,A,B,0,0)
+#define ppc_addd(c,D,A,B) ppc_addx(c,D,A,B,0,1)
+#define ppc_addo(c,D,A,B) ppc_addx(c,D,A,B,1,0)
+#define ppc_addod(c,D,A,B) ppc_addx(c,D,A,B,1,1)
+
+#define ppc_addcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (10 << 1) | Rc)
+#define ppc_addc(c,D,A,B) ppc_addcx(c,D,A,B,0,0)
+#define ppc_addcd(c,D,A,B) ppc_addcx(c,D,A,B,0,1)
+#define ppc_addco(c,D,A,B) ppc_addcx(c,D,A,B,1,0)
+#define ppc_addcod(c,D,A,B) ppc_addcx(c,D,A,B,1,1)
+
+#define ppc_addex(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (138 << 1) | Rc)
+#define ppc_adde(c,D,A,B) ppc_addex(c,D,A,B,0,0)
+#define ppc_added(c,D,A,B) ppc_addex(c,D,A,B,0,1)
+#define ppc_addeo(c,D,A,B) ppc_addex(c,D,A,B,1,0)
+#define ppc_addeod(c,D,A,B) ppc_addex(c,D,A,B,1,1)
+
+#define ppc_addic(c,D,A,i) ppc_emit32(c, (12 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+#define ppc_addicd(c,D,A,i) ppc_emit32(c, (13 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+
+#define ppc_addmex(c,D,A,OE,RC) ppc_emit32(c, (31 << 26) | ((D) << 21 ) | ((A) << 16) | (0 << 11) | ((OE) << 10) | (234 << 1) | RC)
+#define ppc_addme(c,D,A) ppc_addmex(c,D,A,0,0)
+#define ppc_addmed(c,D,A) ppc_addmex(c,D,A,0,1)
+#define ppc_addmeo(c,D,A) ppc_addmex(c,D,A,1,0)
+#define ppc_addmeod(c,D,A) ppc_addmex(c,D,A,1,1)
+
+#define ppc_addzex(c,D,A,OE,RC) ppc_emit32(c, (31 << 26) | ((D) << 21 ) | ((A) << 16) | (0 << 11) | ((OE) << 10) | (202 << 1) | RC)
+#define ppc_addze(c,D,A) ppc_addzex(c,D,A,0,0)
+#define ppc_addzed(c,D,A) ppc_addzex(c,D,A,0,1)
+#define ppc_addzeo(c,D,A) ppc_addzex(c,D,A,1,0)
+#define ppc_addzeod(c,D,A) ppc_addzex(c,D,A,1,1)
+
+#define ppc_andx(c,S,A,B,RC) ppc_emit32(c, (31 << 26) | ((S) << 21 ) | ((A) << 16) | ((B) << 11) | (28 << 1) | RC)
+#define ppc_and(c,S,A,B) ppc_andx(c,S,A,B,0)
+#define ppc_andd(c,S,A,B) ppc_andx(c,S,A,B,1)
+
+#define ppc_andcx(c,S,A,B,RC) ppc_emit32(c, (31 << 26) | ((S) << 21 ) | ((A) << 16) | ((B) << 11) | (60 << 1) | RC)
+#define ppc_andc(c,S,A,B) ppc_andcx(c,S,A,B,0)
+#define ppc_andcd(c,S,A,B) ppc_andcx(c,S,A,B,1)
+
+#define ppc_andid(c,S,A,ui) ppc_emit32(c, (28 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(ui)))
+#define ppc_andisd(c,S,A,ui) ppc_emit32(c, (29 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(ui)))
+
+#define ppc_bcx(c,BO,BI,BD,AA,LK) ppc_emit32(c, (16 << 26) | (BO << 21 )| (BI << 16) | (BD << 2) | ((AA) << 1) | LK)
+#define ppc_bc(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,0)
+#define ppc_bca(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,1,0)
+#define ppc_bcl(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,1)
+#define ppc_bcla(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,1,1)
+
+#define ppc_bcctrx(c,BO,BI,LK) ppc_emit32(c, (19 << 26) | (BO << 21 )| (BI << 16) | (0 << 11) | (528 << 1) | LK)
+#define ppc_bcctr(c,BO,BI) ppc_bcctrx(c,BO,BI,0)
+#define ppc_bcctrl(c,BO,BI) ppc_bcctrx(c,BO,BI,1)
+
+#define ppc_bnectrp(c,BO,BI) ppc_bcctr(c,BO,BI)
+#define ppc_bnectrlp(c,BO,BI) ppc_bcctr(c,BO,BI)
+
+#define ppc_bclrx(c,BO,BI,BH,LK) ppc_emit32(c, (19 << 26) | ((BO) << 21 )| ((BI) << 16) | (0 << 13) | ((BH) << 11) | (16 << 1) | (LK))
+#define ppc_bclr(c,BO,BI,BH) ppc_bclrx(c,BO,BI,BH,0)
+#define ppc_bclrl(c,BO,BI,BH) ppc_bclrx(c,BO,BI,BH,1)
+
+#define ppc_bnelrp(c,BO,BI) ppc_bclr(c,BO,BI,0)
+#define ppc_bnelrlp(c,BO,BI) ppc_bclr(c,BO,BI,0)
+
+#define ppc_cmp(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | ((cfrD) << 23) | (0 << 22) | ((L) << 21) | ((A) << 16) | ((B) << 11) | (0 << 1) | 0)
+#define ppc_cmpi(c,cfrD,L,A,B) ppc_emit32(c, (11 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B))
+#define ppc_cmpl(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | ((cfrD) << 23) | (0 << 22) | ((L) << 21) | ((A) << 16) | ((B) << 11) | (32 << 1) | 0)
+#define ppc_cmpli(c,cfrD,L,A,B) ppc_emit32(c, (10 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B))
+#define ppc_cmpw(c,cfrD,A,B) ppc_cmp(c, (cfrD), 0, (A), (B))
+
+#define ppc_cntlzwx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (26 << 1) | Rc)
+#define ppc_cntlzw(c,S,A) ppc_cntlzwx(c,S,A,0)
+#define ppc_cntlzwd(c,S,A) ppc_cntlzwx(c,S,A,1)
+
+#define ppc_crand(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (257 << 1) | 0)
+#define ppc_crandc(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (129 << 1) | 0)
+#define ppc_creqv(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (289 << 1) | 0)
+#define ppc_crnand(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (225 << 1) | 0)
+#define ppc_crnor(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (33 << 1) | 0)
+#define ppc_cror(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (449 << 1) | 0)
+#define ppc_crorc(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (417 << 1) | 0)
+#define ppc_crxor(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (193 << 1) | 0)
+
+#define ppc_dcba(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (758 << 1) | 0)
+#define ppc_dcbf(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (86 << 1) | 0)
+#define ppc_dcbi(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (470 << 1) | 0)
+#define ppc_dcbst(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (54 << 1) | 0)
+#define ppc_dcbt(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (278 << 1) | 0)
+#define ppc_dcbtst(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (246 << 1) | 0)
+#define ppc_dcbz(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (1014 << 1) | 0)
+
+#define ppc_divwx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (491 << 1) | Rc)
+#define ppc_divw(c,D,A,B) ppc_divwx(c,D,A,B,0,0)
+#define ppc_divwd(c,D,A,B) ppc_divwx(c,D,A,B,0,1)
+#define ppc_divwo(c,D,A,B) ppc_divwx(c,D,A,B,1,0)
+#define ppc_divwod(c,D,A,B) ppc_divwx(c,D,A,B,1,1)
+
+#define ppc_divwux(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (459 << 1) | Rc)
+#define ppc_divwu(c,D,A,B) ppc_divwux(c,D,A,B,0,0)
+#define ppc_divwud(c,D,A,B) ppc_divwux(c,D,A,B,0,1)
+#define ppc_divwuo(c,D,A,B) ppc_divwux(c,D,A,B,1,0)
+#define ppc_divwuod(c,D,A,B) ppc_divwux(c,D,A,B,1,1)
+
+#define ppc_eciwx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (310 << 1) | 0)
+#define ppc_ecowx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (438 << 1) | 0)
+#define ppc_eieio(c) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | (0 << 11) | (854 << 1) | 0)
+
+#define ppc_eqvx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (284 << 1) | Rc)
+#define ppc_eqv(c,A,S,B) ppc_eqvx(c,A,S,B,0)
+#define ppc_eqvd(c,A,S,B) ppc_eqvx(c,A,S,B,1)
+
+#define ppc_extsbx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (954 << 1) | Rc)
+#define ppc_extsb(c,A,S) ppc_extsbx(c,A,S,0)
+#define ppc_extsbd(c,A,S) ppc_extsbx(c,A,S,1)
+
+#define ppc_extshx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (922 << 1) | Rc)
+#define ppc_extsh(c,A,S) ppc_extshx(c,A,S,0)
+#define ppc_extshd(c,A,S) ppc_extshx(c,A,S,1)
+
+#define ppc_fabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (264 << 1) | Rc)
+#define ppc_fabs(c,D,B) ppc_fabsx(c,D,B,0)
+#define ppc_fabsd(c,D,B) ppc_fabsx(c,D,B,1)
+
+#define ppc_faddx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (21 << 1) | Rc)
+#define ppc_fadd(c,D,A,B) ppc_faddx(c,D,A,B,0)
+#define ppc_faddd(c,D,A,B) ppc_faddx(c,D,A,B,1)
+
+#define ppc_faddsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (21 << 1) | Rc)
+#define ppc_fadds(c,D,A,B) ppc_faddsx(c,D,A,B,0)
+#define ppc_faddsd(c,D,A,B) ppc_faddsx(c,D,A,B,1)
+
+#define ppc_fcmpo(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 11) | (32 << 1) | 0)
+#define ppc_fcmpu(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 11) | (0 << 1) | 0)
+
+#define ppc_fctiwx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (14 << 1) | Rc)
+#define ppc_fctiw(c,D,B) ppc_fctiwx(c,D,B,0)
+#define ppc_fctiwd(c,D,B) ppc_fctiwx(c,D,B,1)
+
+#define ppc_fctiwzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (15 << 1) | Rc)
+#define ppc_fctiwz(c,D,B) ppc_fctiwzx(c,D,B,0)
+#define ppc_fctiwzd(c,D,B) ppc_fctiwzx(c,D,B,1)
+
+#define ppc_fdivx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (18 << 1) | Rc)
+#define ppc_fdiv(c,D,A,B) ppc_fdivx(c,D,A,B,0)
+#define ppc_fdivd(c,D,A,B) ppc_fdivx(c,D,A,B,1)
+
+#define ppc_fdivsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (18 << 1) | Rc)
+#define ppc_fdivs(c,D,A,B) ppc_fdivsx(c,D,A,B,0)
+#define ppc_fdivsd(c,D,A,B) ppc_fdivsx(c,D,A,B,1)
+
+#define ppc_fmaddx(c,D,A,B,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (29 << 1) | Rc)
+#define ppc_fmadd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,0)
+#define ppc_fmaddd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,1)
+
+#define ppc_fmaddsx(c,D,A,B,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (29 << 1) | Rc)
+#define ppc_fmadds(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,0)
+#define ppc_fmaddsd(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,1)
+
+#define ppc_fmrx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (72 << 1) | Rc)
+#define ppc_fmr(c,D,B) ppc_fmrx(c,D,B,0)
+#define ppc_fmrd(c,D,B) ppc_fmrx(c,D,B,1)
+
+#define ppc_fmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (28 << 1) | Rc)
+#define ppc_fmsub(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,0)
+#define ppc_fmsubd(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,1)
+
+#define ppc_fmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (28 << 1) | Rc)
+#define ppc_fmsubs(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,0)
+#define ppc_fmsubsd(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,1)
+
+#define ppc_fmulx(c,D,A,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 6) | (25 << 1) | Rc)
+#define ppc_fmul(c,D,A,C) ppc_fmulx(c,D,A,C,0)
+#define ppc_fmuld(c,D,A,C) ppc_fmulx(c,D,A,C,1)
+
+#define ppc_fmulsx(c,D,A,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 6) | (25 << 1) | Rc)
+#define ppc_fmuls(c,D,A,C) ppc_fmulsx(c,D,A,C,0)
+#define ppc_fmulsd(c,D,A,C) ppc_fmulsx(c,D,A,C,1)
+
+#define ppc_fnabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (136 << 1) | Rc)
+#define ppc_fnabs(c,D,B) ppc_fnabsx(c,D,B,0)
+#define ppc_fnabsd(c,D,B) ppc_fnabsx(c,D,B,1)
+
+#define ppc_fnegx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (40 << 1) | Rc)
+#define ppc_fneg(c,D,B) ppc_fnegx(c,D,B,0)
+#define ppc_fnegd(c,D,B) ppc_fnegx(c,D,B,1)
+
+#define ppc_fnmaddx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (31 << 1) | Rc)
+#define ppc_fnmadd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,0)
+#define ppc_fnmaddd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,1)
+
+#define ppc_fnmaddsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (31 << 1) | Rc)
+#define ppc_fnmadds(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,0)
+#define ppc_fnmaddsd(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,1)
+
+#define ppc_fnmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (30 << 1) | Rc)
+#define ppc_fnmsub(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,0)
+#define ppc_fnmsubd(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,1)
+
+#define ppc_fnmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (30 << 1) | Rc)
+#define ppc_fnmsubs(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,0)
+#define ppc_fnmsubsd(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,1)
+
+#define ppc_fresx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (24 << 1) | Rc)
+#define ppc_fres(c,D,B) ppc_fresx(c,D,B,0)
+#define ppc_fresd(c,D,B) ppc_fresx(c,D,B,1)
+
+#define ppc_frspx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (12 << 1) | Rc)
+#define ppc_frsp(c,D,B) ppc_frspx(c,D,B,0)
+#define ppc_frspd(c,D,B) ppc_frspx(c,D,B,1)
+
+#define ppc_frsqrtex(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (26 << 1) | Rc)
+#define ppc_frsqrte(c,D,B) ppc_frsqrtex(c,D,B,0)
+#define ppc_frsqrted(c,D,B) ppc_frsqrtex(c,D,B,1)
+
+#define ppc_fselx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (23 << 1) | Rc)
+#define ppc_fsel(c,D,A,C,B) ppc_fselx(c,D,A,C,B,0)
+#define ppc_fseld(c,D,A,C,B) ppc_fselx(c,D,A,C,B,1)
+
+#define ppc_fsqrtx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (22 << 1) | Rc)
+#define ppc_fsqrt(c,D,B) ppc_fsqrtx(c,D,B,0)
+#define ppc_fsqrtd(c,D,B) ppc_fsqrtx(c,D,B,1)
+
+#define ppc_fsqrtsx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (22 << 1) | Rc)
+#define ppc_fsqrts(c,D,B) ppc_fsqrtsx(c,D,B,0)
+#define ppc_fsqrtsd(c,D,B) ppc_fsqrtsx(c,D,B,1)
+
+#define ppc_fsubx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (20 << 1) | Rc)
+#define ppc_fsub(c,D,A,B) ppc_fsubx(c,D,A,B,0)
+#define ppc_fsubd(c,D,A,B) ppc_fsubx(c,D,A,B,1)
+
+#define ppc_fsubsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (20 << 1) | Rc)
+#define ppc_fsubs(c,D,A,B) ppc_fsubsx(c,D,A,B,0)
+#define ppc_fsubsd(c,D,A,B) ppc_fsubsx(c,D,A,B,1)
+
+#define ppc_icbi(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (982 << 1) | 0)
+
+#define ppc_isync(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (150 << 1) | 0)
+
+#define ppc_lbzu(c,D,d,A) ppc_emit32(c, (35 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lbzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (119 << 1) | 0)
+#define ppc_lbzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (87 << 1) | 0)
+
+#define ppc_lfdu(c,D,d,A) ppc_emit32(c, (51 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lfdux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (631 << 1) | 0)
+#define ppc_lfdx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (599 << 1) | 0)
+
+#define ppc_lfsu(c,D,d,A) ppc_emit32(c, (49 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lfsux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (567 << 1) | 0)
+#define ppc_lfsx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (535 << 1) | 0)
+
+#define ppc_lha(c,D,d,A) ppc_emit32(c, (42 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lhau(c,D,d,A) ppc_emit32(c, (43 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lhaux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (375 << 1) | 0)
+#define ppc_lhax(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (343 << 1) | 0)
+#define ppc_lhbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (790 << 1) | 0)
+#define ppc_lhzu(c,D,d,A) ppc_emit32(c, (41 << 26) | (D << 21) | (A << 16) | (guint16)d)
+
+#define ppc_lhzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (311 << 1) | 0)
+#define ppc_lhzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (279 << 1) | 0)
+
+#define ppc_lmw(c,D,d,A) ppc_emit32(c, (46 << 26) | (D << 21) | (A << 16) | (guint16)d)
+
+#define ppc_lswi(c,D,A,NB) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (NB << 11) | (597 << 1) | 0)
+#define ppc_lswx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (533 << 1) | 0)
+#define ppc_lwarx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (20 << 1) | 0)
+#define ppc_lwbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (534 << 1) | 0)
+
+#define ppc_lwzu(c,D,d,A) ppc_emit32(c, (33 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lwzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (55 << 1) | 0)
+#define ppc_lwzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (23 << 1) | 0)
+
+#define ppc_mcrf(c,crfD,crfS) ppc_emit32(c, (19 << 26) | (crfD << 23) | (0 << 21) | (crfS << 18) | 0)
+#define ppc_mcrfs(c,crfD,crfS) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (crfS << 18) | (0 << 16) | (64 << 1) | 0)
+#define ppc_mcrxr(c,crfD) ppc_emit32(c, (31 << 26) | (crfD << 23) | (0 << 16) | (512 << 1) | 0)
+
+#define ppc_mfcr(c,D) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (19 << 1) | 0)
+#define ppc_mffsx(c,D,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (583 << 1) | Rc)
+#define ppc_mffs(c,D) ppc_mffsx(c,D,0)
+#define ppc_mffsd(c,D) ppc_mffsx(c,D,1)
+#define ppc_mfmsr(c,D) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (83 << 1) | 0)
+#define ppc_mfsr(c,D,SR) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 20) | (SR << 16) | (0 << 11) | (595 << 1) | 0)
+#define ppc_mfsrin(c,D,B) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (B << 11) | (659 << 1) | 0)
+#define ppc_mftb(c,D,TBR) ppc_emit32(c, (31 << 26) | (D << 21) | (TBR << 11) | (371 << 1) | 0)
+
+#define ppc_mtcrf(c,CRM,S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 20) | (CRM << 12) | (0 << 11) | (144 << 1) | 0)
+
+#define ppc_mtfsb0x(c,CRB,Rc) ppc_emit32(c, (63 << 26) | (CRB << 21) | (0 << 11) | (70 << 1) | Rc)
+#define ppc_mtfsb0(c,CRB) ppc_mtfsb0x(c,CRB,0)
+#define ppc_mtfsb0d(c,CRB) ppc_mtfsb0x(c,CRB,1)
+
+#define ppc_mtfsb1x(c,CRB,Rc) ppc_emit32(c, (63 << 26) | (CRB << 21) | (0 << 11) | (38 << 1) | Rc)
+#define ppc_mtfsb1(c,CRB) ppc_mtfsb1x(c,CRB,0)
+#define ppc_mtfsb1d(c,CRB) ppc_mtfsb1x(c,CRB,1)
+
+#define ppc_mtfsfx(c,FM,B,Rc) ppc_emit32(c, (63 << 26) | (0 << 25) | (FM << 22) | (0 << 21) | (B << 11) | (711 << 1) | Rc)
+#define ppc_mtfsf(c,FM,B) ppc_mtfsfx(c,FM,B,0)
+#define ppc_mtfsfd(c,FM,B) ppc_mtfsfx(c,FM,B,1)
+
+#define ppc_mtfsfix(c,crfD,IMM,Rc) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 16) | (IMM << 12) | (0 << 11) | (134 << 1) | Rc)
+#define ppc_mtfsfi(c,crfD,IMM) ppc_mtfsfix(c,crfD,IMM,0)
+#define ppc_mtfsfid(c,crfD,IMM) ppc_mtfsfix(c,crfD,IMM,1)
+
+#define ppc_mtmsr(c, S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 11) | (146 << 1) | 0)
+
+#define ppc_mtsr(c,SR,S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 20) | (SR << 16) | (0 << 11) | (210 << 1) | 0)
+#define ppc_mtsrin(c,S,B) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 16) | (B << 11) | (242 << 1) | 0)
+
+#define ppc_mulhwx(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 10) | (75 << 1) | Rc)
+#define ppc_mulhw(c,D,A,B) ppc_mulhwx(c,D,A,B,0)
+#define ppc_mulhwd(c,D,A,B) ppc_mulhwx(c,D,A,B,1)
+
+#define ppc_mulhwux(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 10) | (11 << 1) | Rc)
+#define ppc_mulhwu(c,D,A,B) ppc_mulhwux(c,D,A,B,0)
+#define ppc_mulhwud(c,D,A,B) ppc_mulhwux(c,D,A,B,1)
+
+#define ppc_mulli(c,D,A,SIMM) ppc_emit32(c, ((07) << 26) | (D << 21) | (A << 16) | (guint16)(SIMM))
+
+#define ppc_mullwx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (235 << 1) | Rc)
+#define ppc_mullw(c,D,A,B) ppc_mullwx(c,D,A,B,0,0)
+#define ppc_mullwd(c,D,A,B) ppc_mullwx(c,D,A,B,0,1)
+#define ppc_mullwo(c,D,A,B) ppc_mullwx(c,D,A,B,1,0)
+#define ppc_mullwod(c,D,A,B) ppc_mullwx(c,D,A,B,1,1)
+
+#define ppc_nandx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (476 << 1) | Rc)
+#define ppc_nand(c,A,S,B) ppc_nandx(c,A,S,B,0)
+#define ppc_nandd(c,A,S,B) ppc_nandx(c,A,S,B,1)
+
+#define ppc_negx(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (104 << 1) | Rc)
+#define ppc_neg(c,D,A) ppc_negx(c,D,A,0,0)
+#define ppc_negd(c,D,A) ppc_negx(c,D,A,0,1)
+#define ppc_nego(c,D,A) ppc_negx(c,D,A,1,0)
+#define ppc_negod(c,D,A) ppc_negx(c,D,A,1,1)
+
+#define ppc_norx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (124 << 1) | Rc)
+#define ppc_nor(c,A,S,B) ppc_norx(c,A,S,B,0)
+#define ppc_nord(c,A,S,B) ppc_norx(c,A,S,B,1)
+
+#define ppc_not(c,A,S) ppc_norx(c,A,S,S,0)
+
+#define ppc_orx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (444 << 1) | Rc)
+#define ppc_ord(c,A,S,B) ppc_orx(c,A,S,B,1)
+
+#define ppc_orcx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (412 << 1) | Rc)
+#define ppc_orc(c,A,S,B) ppc_orcx(c,A,S,B,0)
+#define ppc_orcd(c,A,S,B) ppc_orcx(c,A,S,B,1)
+
+#define ppc_oris(c,A,S,UIMM) ppc_emit32(c, (25 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM))
+
+#define ppc_rfi(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (50 << 1) | 0)
+
+#define ppc_rlwimix(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (20 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc)
+#define ppc_rlwimi(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,0)
+#define ppc_rlwimid(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,1)
+
+#define ppc_rlwinmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (21 << 26) | ((S) << 21) | ((A) << 16) | ((SH) << 11) | ((MB) << 6) | ((ME) << 1) | (Rc))
+#define ppc_rlwinm(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,0)
+#define ppc_rlwinmd(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,1)
+#define ppc_extlwi(c,A,S,n,b) ppc_rlwinm(c,A,S, b, 0, (n) - 1)
+#define ppc_extrwi(c,A,S,n,b) ppc_rlwinm(c,A,S, (b) + (n), 32 - (n), 31)
+#define ppc_rotlwi(c,A,S,n) ppc_rlwinm(c,A,S, n, 0, 31)
+#define ppc_rotrwi(c,A,S,n) ppc_rlwinm(c,A,S, 32 - (n), 0, 31)
+#define ppc_slwi(c,A,S,n) ppc_rlwinm(c,A,S, n, 0, 31 - (n))
+#define ppc_srwi(c,A,S,n) ppc_rlwinm(c,A,S, 32 - (n), n, 31)
+#define ppc_clrlwi(c,A,S,n) ppc_rlwinm(c,A,S, 0, n, 31)
+#define ppc_clrrwi(c,A,S,n) ppc_rlwinm(c,A,S, 0, 0, 31 - (n))
+#define ppc_clrlslwi(c,A,S,b,n) ppc_rlwinm(c,A,S, n, (b) - (n), 31 - (n))
+
+#define ppc_rlwnmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (23 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc)
+#define ppc_rlwnm(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,0)
+#define ppc_rlwnmd(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,1)
+
+#define ppc_sc(c) ppc_emit32(c, (17 << 26) | (0 << 2) | (1 << 1) | 0)
+
+#define ppc_slwx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (24 << 1) | Rc)
+#define ppc_slw(c,S,A,B) ppc_slwx(c,S,A,B,0)
+#define ppc_slwd(c,S,A,B) ppc_slwx(c,S,A,B,1)
+
+#define ppc_srawx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (792 << 1) | Rc)
+#define ppc_sraw(c,A,S,B) ppc_srawx(c,A,S,B,0)
+#define ppc_srawd(c,A,S,B) ppc_srawx(c,A,S,B,1)
+
+#define ppc_srawix(c,A,S,SH,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (SH << 11) | (824 << 1) | Rc)
+#define ppc_srawi(c,A,S,B) ppc_srawix(c,A,S,B,0)
+#define ppc_srawid(c,A,S,B) ppc_srawix(c,A,S,B,1)
+
+#define ppc_srwx(c,A,S,SH,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (SH << 11) | (536 << 1) | Rc)
+#define ppc_srw(c,A,S,B) ppc_srwx(c,A,S,B,0)
+#define ppc_srwd(c,A,S,B) ppc_srwx(c,A,S,B,1)
+
+#define ppc_stbu(c,S,d,A) ppc_emit32(c, (39 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+
+#define ppc_stbux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (247 << 1) | 0)
+#define ppc_stbx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (215 << 1) | 0)
+
+#define ppc_stfdu(c,S,d,A) ppc_emit32(c, (55 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+
+#define ppc_stfdx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (727 << 1) | 0)
+#define ppc_stfiwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (983 << 1) | 0)
+
+#define ppc_stfsu(c,S,d,A) ppc_emit32(c, (53 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+#define ppc_stfsux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (695 << 1) | 0)
+#define ppc_stfsx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (663 << 1) | 0)
+#define ppc_sthbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (918 << 1) | 0)
+#define ppc_sthu(c,S,d,A) ppc_emit32(c, (45 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+#define ppc_sthux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (439 << 1) | 0)
+#define ppc_sthx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (407 << 1) | 0)
+#define ppc_stmw(c,S,d,A) ppc_emit32(c, (47 << 26) | (S << 21) | (A << 16) | (guint16)d)
+#define ppc_stswi(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (725 << 1) | 0)
+#define ppc_stswx(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (661 << 1) | 0)
+#define ppc_stwbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (662 << 1) | 0)
+#define ppc_stwcxd(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (150 << 1) | 1)
+#define ppc_stwux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (183 << 1) | 0)
+#define ppc_stwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (151 << 1) | 0)
+
+#define ppc_subfx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (40 << 1) | Rc)
+#define ppc_subf(c,D,A,B) ppc_subfx(c,D,A,B,0,0)
+#define ppc_subfd(c,D,A,B) ppc_subfx(c,D,A,B,0,1)
+#define ppc_subfo(c,D,A,B) ppc_subfx(c,D,A,B,1,0)
+#define ppc_subfod(c,D,A,B) ppc_subfx(c,D,A,B,1,1)
+
+#define ppc_sub(c,D,A,B) ppc_subf(c,D,B,A)
+
+#define ppc_subfcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (8 << 1) | Rc)
+#define ppc_subfc(c,D,A,B) ppc_subfcx(c,D,A,B,0,0)
+#define ppc_subfcd(c,D,A,B) ppc_subfcx(c,D,A,B,0,1)
+#define ppc_subfco(c,D,A,B) ppc_subfcx(c,D,A,B,1,0)
+#define ppc_subfcod(c,D,A,B) ppc_subfcx(c,D,A,B,1,1)
+
+#define ppc_subfex(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (136 << 1) | Rc)
+#define ppc_subfe(c,D,A,B) ppc_subfex(c,D,A,B,0,0)
+#define ppc_subfed(c,D,A,B) ppc_subfex(c,D,A,B,0,1)
+#define ppc_subfeo(c,D,A,B) ppc_subfex(c,D,A,B,1,0)
+#define ppc_subfeod(c,D,A,B) ppc_subfex(c,D,A,B,1,1)
+
+#define ppc_subfic(c,D,A,SIMM) ppc_emit32(c, (8 << 26) | (D << 21) | (A << 16) | (guint16)(SIMM))
+
+#define ppc_subfmex(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (232 << 1) | Rc)
+#define ppc_subfme(c,D,A) ppc_subfmex(c,D,A,0,0)
+#define ppc_subfmed(c,D,A) ppc_subfmex(c,D,A,0,1)
+#define ppc_subfmeo(c,D,A) ppc_subfmex(c,D,A,1,0)
+#define ppc_subfmeod(c,D,A) ppc_subfmex(c,D,A,1,1)
+
+#define ppc_subfzex(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (200 << 1) | Rc)
+#define ppc_subfze(c,D,A) ppc_subfzex(c,D,A,0,0)
+#define ppc_subfzed(c,D,A) ppc_subfzex(c,D,A,0,1)
+#define ppc_subfzeo(c,D,A) ppc_subfzex(c,D,A,1,0)
+#define ppc_subfzeod(c,D,A) ppc_subfzex(c,D,A,1,1)
+
+#define ppc_sync(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (598 << 1) | 0)
+#define ppc_tlbia(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (370 << 1) | 0)
+#define ppc_tlbie(c,B) ppc_emit32(c, (31 << 26) | (0 << 16) | (B << 11) | (306 << 1) | 0)
+#define ppc_tlbsync(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (566 << 1) | 0)
+
+#define ppc_tw(c,TO,A,B) ppc_emit32(c, (31 << 26) | (TO << 21) | (A << 16) | (B << 11) | (4 << 1) | 0)
+#define ppc_twi(c,TO,A,SIMM) ppc_emit32(c, (3 << 26) | (TO << 21) | (A << 16) | (guint16)(SIMM))
+
+#define ppc_xorx(c,A,S,B,RC) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (316 << 1) | RC)
+#define ppc_xor(c,A,S,B) ppc_xorx(c,A,S,B,0)
+#define ppc_xord(c,A,S,B) ppc_xorx(c,A,S,B,1)
+
+#define ppc_xori(c,S,A,UIMM) ppc_emit32(c, (26 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM))
+#define ppc_xoris(c,S,A,UIMM) ppc_emit32(c, (27 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM))
+
+/* this marks the end of my work, ct */
+
+/* PPC64 */
+
+/* The following FP instructions are not are available to 32-bit
+ implementations (prior to PowerISA-V2.01 but are available to
+ 32-bit mode programs on 64-bit PowerPC implementations and all
+ processors compliant with PowerISA-2.01 or later. */
+
+#define ppc_fcfidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (846 << 1) | (Rc))
+#define ppc_fcfid(c,D,B) ppc_fcfidx(c,D,B,0)
+#define ppc_fcfidd(c,D,B) ppc_fcfidx(c,D,B,1)
+
+#define ppc_fctidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (814 << 1) | (Rc))
+#define ppc_fctid(c,D,B) ppc_fctidx(c,D,B,0)
+#define ppc_fctidd(c,D,B) ppc_fctidx(c,D,B,1)
+
+#define ppc_fctidzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (815 << 1) | (Rc))
+#define ppc_fctidz(c,D,B) ppc_fctidzx(c,D,B,0)
+#define ppc_fctidzd(c,D,B) ppc_fctidzx(c,D,B,1)
+
+#ifdef __mono_ppc64__
+
+#define ppc_load_sequence(c,D,v) G_STMT_START { \
+ ppc_lis ((c), (D), ((guint64)(v) >> 48) & 0xffff); \
+ ppc_ori ((c), (D), (D), ((guint64)(v) >> 32) & 0xffff); \
+ ppc_sldi ((c), (D), (D), 32); \
+ ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \
+ ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \
+ } G_STMT_END
+
+#define PPC_LOAD_SEQUENCE_LENGTH 20
+
+#define ppc_is_imm32(val) (((((gint64)val)>> 31) == 0) || ((((gint64)val)>> 31) == -1))
+#define ppc_is_imm48(val) (((((gint64)val)>> 47) == 0) || ((((gint64)val)>> 47) == -1))
+
+#define ppc_load48(c,D,v) G_STMT_START { \
+ ppc_li ((c), (D), ((gint64)(v) >> 32) & 0xffff); \
+ ppc_sldi ((c), (D), (D), 32); \
+ ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \
+ ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \
+ } G_STMT_END
+
+#define ppc_load(c,D,v) G_STMT_START { \
+ if (ppc_is_imm16 ((guint64)(v))) { \
+ ppc_li ((c), (D), (guint16)(guint64)(v)); \
+ } else if (ppc_is_imm32 ((guint64)(v))) { \
+ ppc_load32 ((c), (D), (guint32)(guint64)(v)); \
+ } else if (ppc_is_imm48 ((guint64)(v))) { \
+ ppc_load48 ((c), (D), (guint64)(v)); \
+ } else { \
+ ppc_load_sequence ((c), (D), (guint64)(v)); \
+ } \
+ } G_STMT_END
+
+#define ppc_load_func(c,D,v) G_STMT_START { \
+ ppc_load_sequence ((c), ppc_r11, (guint64)(gsize)(v)); \
+ ppc_ldptr ((c), ppc_r2, sizeof (gpointer), ppc_r11); \
+ ppc_ldptr ((c), (D), 0, ppc_r11); \
+ } G_STMT_END
+
+#define ppc_load_multiple_regs(c,D,d,A) G_STMT_START { \
+ int __i, __o = (d); \
+ for (__i = (D); __i <= 31; ++__i) { \
+ ppc_ldr ((c), __i, __o, (A)); \
+ __o += sizeof (guint64); \
+ } \
+ } G_STMT_END
+
+#define ppc_store_multiple_regs(c,S,d,A) G_STMT_START { \
+ int __i, __o = (d); \
+ for (__i = (S); __i <= 31; ++__i) { \
+ ppc_str ((c), __i, __o, (A)); \
+ __o += sizeof (guint64); \
+ } \
+ } G_STMT_END
+
+#define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 1, (A), (B))
+#define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 1, (A), (B))
+#define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 1, (A), (B))
+
+#define ppc_shift_left(c,A,S,B) ppc_sld((c), (A), (S), (B))
+#define ppc_shift_left_imm(c,A,S,n) ppc_sldi((c), (A), (S), (n))
+
+#define ppc_shift_right_imm(c,A,S,B) ppc_srdi((c), (A), (S), (B))
+#define ppc_shift_right_arith_imm(c,A,S,B) ppc_sradi((c), (A), (S), (B))
+
+#define ppc_multiply(c,D,A,B) ppc_mulld((c), (D), (A), (B))
+
+#define ppc_clear_right_imm(c,A,S,n) ppc_clrrdi((c), (A), (S), (n))
+
+#define ppc_divdx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (489 << 1) | (Rc))
+#define ppc_divd(c,D,A,B) ppc_divdx(c,D,A,B,0,0)
+#define ppc_divdd(c,D,A,B) ppc_divdx(c,D,A,B,0,1)
+#define ppc_divdo(c,D,A,B) ppc_divdx(c,D,A,B,1,0)
+#define ppc_divdod(c,D,A,B) ppc_divdx(c,D,A,B,1,1)
+
+#define ppc_divdux(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (457 << 1) | (Rc))
+#define ppc_divdu(c,D,A,B) ppc_divdux(c,D,A,B,0,0)
+#define ppc_divdud(c,D,A,B) ppc_divdux(c,D,A,B,0,1)
+#define ppc_divduo(c,D,A,B) ppc_divdux(c,D,A,B,1,0)
+#define ppc_divduod(c,D,A,B) ppc_divdux(c,D,A,B,1,1)
+
+#define ppc_extswx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | (0 << 11) | (986 << 1) | (Rc))
+#define ppc_extsw(c,A,S) ppc_extswx(c,S,A,0)
+#define ppc_extswd(c,A,S) ppc_extswx(c,S,A,1)
+
+/* These move float to/from instuctions are only available on POWER6 in
+ native mode. These instruction are faster then the equivalent
+ store/load because they avoid the store queue and associated delays.
+ These instructions should only be used in 64-bit mode unless the
+ kernel preserves the 64-bit GPR on signals and dispatch in 32-bit
+ mode. The Linux kernel does not. */
+#define ppc_mftgpr(c,T,B) ppc_emit32(c, (31 << 26) | ((T) << 21) | (0 << 16) | ((B) << 11) | (735 << 1) | 0)
+#define ppc_mffgpr(c,T,B) ppc_emit32(c, (31 << 26) | ((T) << 21) | (0 << 16) | ((B) << 11) | (607 << 1) | 0)
+
+#define ppc_ld(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 0)
+#define ppc_lwa(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((ds) & 0xfffc) | 2)
+#define ppc_ldarx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (84 << 1) | 0)
+#define ppc_ldu(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 1)
+#define ppc_ldux(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (53 << 1) | 0)
+#define ppc_lwaux(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (373 << 1) | 0)
+#define ppc_ldx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (21 << 1) | 0)
+#define ppc_lwax(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (341 << 1) | 0)
+
+#define ppc_mulhdx(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (0 << 10) | (73 << 1) | (Rc))
+#define ppc_mulhd(c,D,A,B) ppc_mulhdx(c,D,A,B,0)
+#define ppc_mulhdd(c,D,A,B) ppc_mulhdx(c,D,A,B,1)
+#define ppc_mulhdux(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (0 << 10) | (9 << 1) | (Rc))
+#define ppc_mulhdu(c,D,A,B) ppc_mulhdux(c,D,A,B,0)
+#define ppc_mulhdud(c,D,A,B) ppc_mulhdux(c,D,A,B,1)
+
+#define ppc_mulldx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (233 << 1) | (Rc))
+#define ppc_mulld(c,D,A,B) ppc_mulldx(c,D,A,B,0,0)
+#define ppc_mulldd(c,D,A,B) ppc_mulldx(c,D,A,B,0,1)
+#define ppc_mulldo(c,D,A,B) ppc_mulldx(c,D,A,B,1,0)
+#define ppc_mulldod(c,D,A,B) ppc_mulldx(c,D,A,B,1,1)
+
+#define ppc_rldclx(c,A,S,B,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (ppc_split_5_1(MB) << 5) | (8 << 1) | (Rc))
+#define ppc_rldcl(c,A,S,B,MB) ppc_rldclx(c,A,S,B,MB,0)
+#define ppc_rldcld(c,A,S,B,MB) ppc_rldclx(c,A,S,B,MB,1)
+#define ppc_rotld(c,A,S,B) ppc_rldcl(c, A, S, B, 0)
+
+#define ppc_rldcrx(c,A,S,B,ME,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (ppc_split_5_1(ME) << 5) | (9 << 1) | (Rc))
+#define ppc_rldcr(c,A,S,B,ME) ppc_rldcrx(c,A,S,B,ME,0)
+#define ppc_rldcrd(c,A,S,B,ME) ppc_rldcrx(c,A,S,B,ME,1)
+
+#define ppc_rldicx(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (2 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldic(c,A,S,SH,MB) ppc_rldicx(c,S,A,SH,MB,0)
+#define ppc_rldicd(c,A,S,SH,MB) ppc_rldicx(c,S,A,SH,MB,1)
+
+#define ppc_rldiclx(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (0 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldicl(c,A,S,SH,MB) ppc_rldiclx(c,S,A,SH,MB,0)
+#define ppc_rldicld(c,A,S,SH,MB) ppc_rldiclx(c,S,A,SH,MB,1)
+#define ppc_extrdi(c,A,S,n,b) ppc_rldicl(c,A,S, (b) + (n), 64 - (n))
+#define ppc_rotldi(c,A,S,n) ppc_rldicl(c,A,S, n, 0)
+#define ppc_rotrdi(c,A,S,n) ppc_rldicl(c,A,S, 64 - (n), 0)
+#define ppc_srdi(c,A,S,n) ppc_rldicl(c,A,S, 64 - (n), n)
+#define ppc_clrldi(c,A,S,n) ppc_rldicl(c,A,S, 0, n)
+
+#define ppc_rldicrx(c,A,S,SH,ME,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(ME) << 5) | (1 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldicr(c,A,S,SH,ME) ppc_rldicrx(c,A,S,SH,ME,0)
+#define ppc_rldicrd(c,A,S,SH,ME) ppc_rldicrx(c,A,S,SH,ME,1)
+#define ppc_extldi(c,A,S,n,b) ppc_rldicr(c, A, S, b, (n) - 1)
+#define ppc_sldi(c,A,S,n) ppc_rldicr(c, A, S, n, 63 - (n))
+#define ppc_clrrdi(c,A,S,n) ppc_rldicr(c, A, S, 0, 63 - (n))
+
+#define ppc_rldimix(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (3 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldimi(c,A,S,SH,MB) ppc_rldimix(c,S,A,SH,MB,0)
+#define ppc_rldimid(c,A,S,SH,MB) ppc_rldimix(c,S,A,SH,MB,1)
+
+#define ppc_slbia(c) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | (0 << 11) | (498 << 1) | 0)
+#define ppc_slbie(c,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | ((B) << 11) | (434 << 1) | 0)
+#define ppc_sldx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (27 << 1) | (Rc))
+#define ppc_sld(c,A,S,B) ppc_sldx(c,S,A,B,0)
+#define ppc_sldd(c,A,S,B) ppc_sldx(c,S,A,B,1)
+
+#define ppc_sradx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (794 << 1) | (Rc))
+#define ppc_srad(c,A,S,B) ppc_sradx(c,S,A,B,0)
+#define ppc_sradd(c,A,S,B) ppc_sradx(c,S,A,B,1)
+#define ppc_sradix(c,S,A,SH,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | (((SH) & 31) << 11) | (413 << 2) | (((SH) >> 5) << 1) | (Rc))
+#define ppc_sradi(c,A,S,SH) ppc_sradix(c,S,A,SH,0)
+#define ppc_sradid(c,A,S,SH) ppc_sradix(c,S,A,SH,1)
+
+#define ppc_srdx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (539 << 1) | (Rc))
+#define ppc_srd(c,A,S,B) ppc_srdx(c,S,A,B,0)
+#define ppc_srdd(c,A,S,B) ppc_srdx(c,S,A,B,1)
+
+#define ppc_std(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 0)
+#define ppc_stdcxd(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (214 << 1) | 1)
+#define ppc_stdu(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 1)
+#define ppc_stdux(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (181 << 1) | 0)
+#define ppc_stdx(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (149 << 1) | 0)
+
+#else
+/* Always true for 32-bit */
+#define ppc_is_imm32(val) (1)
+#endif
+
+#endif
diff --git a/lib/ffts/src/arch/s390x/.gitignore b/lib/ffts/src/arch/s390x/.gitignore
new file mode 100644
index 0000000..341daec
--- /dev/null
+++ b/lib/ffts/src/arch/s390x/.gitignore
@@ -0,0 +1,6 @@
+/Makefile
+/Makefile.in
+/.libs
+/.deps
+/*.la
+/*.lo
diff --git a/lib/ffts/src/arch/s390x/ChangeLog b/lib/ffts/src/arch/s390x/ChangeLog
new file mode 100644
index 0000000..e756d35
--- /dev/null
+++ b/lib/ffts/src/arch/s390x/ChangeLog
@@ -0,0 +1,35 @@
+2010-03-23 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Remove duplicate
+
+2009-06-24 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add some new instructions.
+
+2007-04-12 Neale Ferguson <neale@sinenomine.net>
+
+ * tramp.c: Add MONO_TYPE_PTR case.
+
+2007-01-23 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add packed attribute to several instruction structures.
+
+2006-03-13 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Fix immediate checks.
+
+2006-01-06 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add lpdbr instruction (OP_ABS).
+
+2006-01-03 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add some new instructions.
+
+2004-12-15 Neale Ferguson <Neale.Ferguson@SoftwareAG-usa.com>
+
+ * s390x-codegen.h: Add some new instructions (CS, CSG, CSY, CDS, CDSG, CDSY)
+
+2004-08-03 Neale Ferguson <Neale.Ferguson@SoftwareAG-usa.com>
+
+ * s390x-codegen.h Makefile.am tramp.c: S/390 64-bit interpreter
diff --git a/lib/ffts/src/arch/s390x/Makefile.am b/lib/ffts/src/arch/s390x/Makefile.am
new file mode 100644
index 0000000..ce7f470
--- /dev/null
+++ b/lib/ffts/src/arch/s390x/Makefile.am
@@ -0,0 +1,7 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-s390x.la
+
+libmonoarch_s390x_la_SOURCES = tramp.c s390x-codegen.h
+
diff --git a/lib/ffts/src/arch/s390x/s390x-codegen.h b/lib/ffts/src/arch/s390x/s390x-codegen.h
new file mode 100644
index 0000000..47e6564
--- /dev/null
+++ b/lib/ffts/src/arch/s390x/s390x-codegen.h
@@ -0,0 +1,997 @@
+/*
+ Copyright (C) 2001 Radek Doulik
+*/
+
+#ifndef S390X_H
+#define S390X_H
+#include <glib.h>
+#include <assert.h>
+#include <limits.h>
+
+#define FLOAT_REGS 2 /* No. float registers for parms */
+#define GENERAL_REGS 5 /* No. general registers for parms */
+
+#define ARG_BASE s390_r10 /* Register for addressing arguments*/
+#define STKARG \
+ (i*(sizeof(stackval))) /* Displacement of ith argument */
+
+#define MINV_POS 160 /* MonoInvocation stack offset */
+#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count)
+#define OBJ_POS 8
+#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type))
+
+#define MIN_CACHE_LINE 256
+
+/*------------------------------------------------------------------*/
+/* Sequence to add an int/long long to parameters to stack_from_data*/
+/*------------------------------------------------------------------*/
+#define ADD_ISTACK_PARM(r, i) \
+ if (reg_param < GENERAL_REGS-(r)) { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ local_start + (reg_param - this_flag) * sizeof(long)); \
+ reg_param += (i); \
+ } else { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(long)); \
+ stack_param += (i); \
+ }
+
+/*------------------------------------------------------------------*/
+/* Sequence to add a float/double to parameters to stack_from_data */
+/*------------------------------------------------------------------*/
+#define ADD_RSTACK_PARM(i) \
+ if (fpr_param < FLOAT_REGS) { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ float_pos + (fpr_param * sizeof(float) * (i))); \
+ fpr_param++; \
+ } else { \
+ stack_param += (stack_param % (i)); \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(float) * (i)); \
+ stack_param += (i); \
+ }
+
+/*------------------------------------------------------------------*/
+/* Sequence to add a structure ptr to parameters to stack_from_data */
+/*------------------------------------------------------------------*/
+#define ADD_TSTACK_PARM \
+ if (reg_param < GENERAL_REGS) { \
+ s390_ly (p, s390_r4, 0, STK_BASE, \
+ local_start + (reg_param - this_flag) * sizeof(long)); \
+ reg_param++; \
+ } else { \
+ s390_ly (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(long)); \
+ stack_param++; \
+ }
+
+#define ADD_PSTACK_PARM(r, i) \
+ if (reg_param < GENERAL_REGS-(r)) { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ local_start + (reg_param - this_flag) * sizeof(long)); \
+ reg_param += (i); \
+ } else { \
+ s390_ly (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(long)); \
+ stack_param++; \
+ }
+
+typedef enum {
+ s390_r0 = 0,
+ s390_r1,
+ s390_r2,
+ s390_r3,
+ s390_r4,
+ s390_r5,
+ s390_r6,
+ s390_r7,
+ s390_r8,
+ s390_r9,
+ s390_r10,
+ s390_r11,
+ s390_r12,
+ s390_r13,
+ s390_r14,
+ s390_r15,
+} S390IntRegister;
+
+typedef enum {
+ s390_f0 = 0,
+ s390_f1,
+ s390_f2,
+ s390_f3,
+ s390_f4,
+ s390_f5,
+ s390_f6,
+ s390_f7,
+ s390_f8,
+ s390_f9,
+ s390_f10,
+ s390_f11,
+ s390_f12,
+ s390_f13,
+ s390_f14,
+ s390_f15,
+} S390FloatRegister;
+
+typedef enum {
+ s390_a0 = 0,
+ s390_a1,
+ s390_a2,
+ s390_a3,
+ s390_a4,
+ s390_a5,
+ s390_a6,
+ s390_a7,
+ s390_a8,
+ s390_a9,
+ s390_a10,
+ s390_a11,
+ s390_a12,
+ s390_a13,
+ s390_a14,
+ s390_a15,
+} S390AccRegister;
+
+typedef enum {
+ s390_fpc = 256,
+} S390SpecialRegister;
+
+#define s390_is_imm16(val) ((glong)val >= (glong) SHRT_MIN && \
+ (glong)val <= (glong) SHRT_MAX)
+#define s390_is_imm32(val) ((glong)val >= (glong) INT_MIN && \
+ (glong)val <= (glong) INT_MAX)
+#define s390_is_uimm16(val) ((glong)val >= 0 && (glong)val <= (glong) USHRT_MAX)
+#define s390_is_uimm32(val) ((glong)val >= 0 && (glong)val <= (glong) UINT_MAX)
+#define s390_is_uimm20(val) ((glong)val >= 0 && (glong)val <= 1048575)
+#define s390_is_imm20(val) ((glong)val >= -524288 && (glong)val <= 524287)
+#define s390_is_imm12(val) ((glong)val >= (glong)-4096 && \
+ (glong)val <= (glong)4095)
+#define s390_is_uimm12(val) ((glong)val >= 0 && (glong)val <= 4095)
+
+#define STK_BASE s390_r15
+#define S390_SP s390_r15
+#define S390_FP s390_r11
+#define S390_MINIMAL_STACK_SIZE 160
+#define S390_REG_SAVE_OFFSET 48
+#define S390_PARM_SAVE_OFFSET 16
+#define S390_RET_ADDR_OFFSET 112
+#define S390_FLOAT_SAVE_OFFSET 128
+
+#define S390_CC_ZR 8
+#define S390_CC_NE 7
+#define S390_CC_NZ 7
+#define S390_CC_LT 4
+#define S390_CC_GT 2
+#define S390_CC_GE 11
+#define S390_CC_NM 11
+#define S390_CC_LE 13
+#define S390_CC_OV 1
+#define S390_CC_NO 14
+#define S390_CC_CY 3
+#define S390_CC_NC 12
+#define S390_CC_UN 15
+
+#define s390_word(addr, value) do \
+{ \
+ * (guint32 *) addr = (guint32) value; \
+ addr += sizeof(guint32); \
+} while (0)
+
+#define s390_float(addr, value) do \
+{ \
+ * (gfloat *) addr = (gfloat) value; \
+ addr += sizeof(gfloat); \
+} while (0)
+
+#define s390_llong(addr, value) do \
+{ \
+ * (guint64 *) addr = (guint64) value; \
+ addr += sizeof(guint64); \
+} while (0)
+
+#define s390_double(addr, value) do \
+{ \
+ * (gdouble *) addr = (gdouble) value; \
+ addr += sizeof(gdouble); \
+} while (0)
+
+typedef struct {
+ short op;
+} E_Format;
+
+typedef struct {
+ char op;
+ int im;
+} I_Format;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r2 : 4;
+} RR_Format;
+
+typedef struct {
+ short op;
+ char xx;
+ char r1 : 4;
+ char r2 : 4;
+} RRE_Format;
+
+typedef struct {
+ short op;
+ char r1 : 4;
+ char xx : 4;
+ char r3 : 4;
+ char r2 : 4;
+} RRF_Format_1;
+
+typedef struct {
+ short op;
+ char m3 : 4;
+ char xx : 4;
+ char r1 : 4;
+ char r2 : 4;
+} RRF_Format_2;
+
+typedef struct {
+ short op;
+ char r3 : 4;
+ char m4 : 4;
+ char r1 : 4;
+ char r2 : 4;
+} RRF_Format_3;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ short d2 : 12;
+} RX_Format;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ int d2 : 12;
+ char xx;
+ char op2;
+} RXE_Format;
+
+typedef struct {
+ char op1;
+ char r3 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ int d2 : 12;
+ char r1 : 4;
+ char xx : 4;
+ char op2;
+} RXF_Format;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ int d2 : 20;
+ char op2;
+} __attribute__ ((packed)) RXY_Format;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ char b2 : 4;
+ int d2 : 12;
+} RS_Format_1;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char m3 : 4;
+ char b2 : 4;
+ int d2 : 12;
+} RS_Format_2;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char xx : 4;
+ char b2 : 4;
+ int d2 : 12;
+} RS_Format_3;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ char b2 : 4;
+ int d2 : 20;
+ char op2;
+} __attribute__ ((packed)) RSY_Format_1;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char m3 : 4;
+ char b2 : 4;
+ int d2 : 20;
+ char op2;
+} __attribute__ ((packed)) RSY_Format_2;
+
+typedef struct {
+ char op1;
+ char l1 : 4;
+ char xx : 4;
+ char b1 : 4;
+ int d1 : 12;
+ char yy;
+ char op2;
+} RSL_Format;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ short i2;
+} RSI_Format;
+
+typedef struct {
+ char op1;
+ char m1 : 4;
+ char op2 : 4;
+ short i2;
+} RI_Format;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ short i2;
+ char xx;
+ char op2;
+} RIE_Format_1;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ short i2;
+ char m2 : 4;
+ char xx : 4;
+ char op2;
+} RIE_Format_2;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ short d;
+ char i;
+ char op2;
+} RIE_Format_3;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char yy : 4;
+ short i2;
+ char m3 : 4;
+ char xx : 4;
+ char op2;
+} RIE_Format_4;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char op2 : 4;
+ int i2;
+} __attribute__ ((packed)) RIL_Format_1;
+
+typedef struct {
+ char op1;
+ char m1 : 4;
+ char op2 : 4;
+ int i2;
+} __attribute__ ((packed)) RIL_Format_2;
+
+typedef struct {
+ char op;
+ char i2;
+ char b1 : 4;
+ short d1 : 12;
+} SI_Format;
+
+typedef struct {
+ char op1;
+ char i2;
+ char b1 : 4;
+ int d1 : 20;
+ char op2;
+} __attribute__ ((packed)) SIY_Format;
+
+typedef struct {
+ short op;
+ char b2 : 4;
+ short d2 : 12;
+} S_Format;
+
+typedef struct {
+ char op;
+ char ll;
+ char b1 : 4;
+ short d1 : 12;
+ char b2 : 4;
+ short d2 : 12;
+} SS_Format_1;
+
+typedef struct {
+ char op;
+ char l1 : 4;
+ char l2 : 4;
+ char b1 : 4;
+ short d1 : 12;
+ char b2 : 4;
+ short d2 : 12;
+} SS_Format_2;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ char b1 : 4;
+ short d1 : 12;
+ char b2 : 4;
+ short d2 : 12;
+} SS_Format_3;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ char b2 : 4;
+ short d2 : 12;
+ char b4 : 4;
+ short d4 : 12;
+} SS_Format_4;
+
+typedef struct {
+ short op;
+ short tb1 : 4;
+ short d1 : 12;
+ short b2 : 4;
+ short d2 : 12;
+} __attribute__ ((packed)) SSE_Format;
+
+typedef struct {
+ short op;
+ char r3 : 4;
+ char o2 : 4;
+ short b1 : 4;
+ short d1 : 12;
+ short b2 : 4;
+ short d2 : 12;
+} __attribute__ ((packed)) SSF_Format;
+
+#define s390_emit16(c, x) do \
+{ \
+ *((guint16 *) c) = (guint16) x; \
+ c += sizeof(guint16); \
+} while(0)
+
+#define s390_emit32(c, x) do \
+{ \
+ *((guint32 *) c) = (guint32) x; \
+ c += sizeof(guint32); \
+} while(0)
+
+#define S390_E(c,opc) s390_emit16(c,opc)
+
+#define S390_I(c,opc,imm) s390_emit16(c, (opc << 8 | imm))
+
+#define S390_RR(c,opc,g1,g2) s390_emit16(c, (opc << 8 | (g1) << 4 | g2))
+
+#define S390_RRE(c,opc,g1,g2) s390_emit32(c, (opc << 16 | (g1) << 4 | g2))
+
+#define S390_RRF_1(c,opc,g1,g2,g3) s390_emit32(c, (opc << 16 | (g1) << 12 | (g3) << 4 | g2))
+
+#define S390_RRF_2(c,opc,g1,k3,g2) s390_emit32(c, (opc << 16 | (k3) << 12 | (g1) << 4 | g2))
+
+#define S390_RRF_3(c,opc,g1,g2,k4,g3) s390_emit32(c, (opc << 16 | (g3) << 12 | (k4) << 8 | (g1) << 4 | g2))
+
+#define S390_RX(c,opc,g1,n2,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (n2) << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RXE(c,opc,g1,n2,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RXY(c,opc,g1,n2,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ ((((p2) & 0xff000) >> 12) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RS_1(c,opc,g1,g3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RS_2(c,opc,g1,k3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (k3) << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RS_3(c,opc,g1,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RSY_1(c,opc,g1,g3,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ ((((p2) & 0xff000) >> 12) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RSY_2(c,opc,g1,k3,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | k3)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ ((((p2) & 0xff000) >> 12) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RSL(c,opc,ln,s1,p1) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (ln) << 4)); \
+ s390_emit32(c, ((s1) << 28 | ((s1 & 0xfff) << 16) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RSI(c,opc,g1,g3,m2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (m2 & 0xffff)))
+
+#define S390_RI(c,opc,g1,m2) s390_emit32(c, ((opc >> 4) << 24 | (g1) << 20 | (opc & 0x0f) << 16 | (m2 & 0xffff)))
+
+#define S390_RIE_1(c,opc,g1,g3,m2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \
+ s390_emit32(c, ((m2) << 16 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIE_2(c,opc,g1,g2,m3,v) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \
+ s390_emit16(c, (v)); \
+ s390_emit16(c, ((m2) << 12 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIE_3(c,opc,g1,i,m3,d) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | m3)); \
+ s390_emit16(c, (d)); \
+ s390_emit16(c, ((i) << 8 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIE_4(c,opc,g1,i2,m3) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4); \
+ s390_emit16(c, (i2)); \
+ s390_emit16(c, ((m3) << 12 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIL_1(c,opc,g1,m2) do \
+{ \
+ s390_emit16(c, ((opc >> 4) << 8 | (g1) << 4 | (opc & 0xf))); \
+ s390_emit32(c, m2); \
+} while (0)
+
+#define S390_RIL_2(c,opc,k1,m2) do \
+{ \
+ s390_emit16(c, ((opc >> 4) << 8 | (k1) << 4 | (opc & 0xf))); \
+ s390_emit32(c, m2); \
+} while (0)
+
+#define S390_RIS(c,opc,r,i,m3,b,d) do \
+{ \
+ s390_emit16(c, ((opc, & 0xff00) | (r1) << 4) | (r2)); \
+ s390_emit16(c, ((b) << 12) | (d)); \
+ s390_emit16(c, ((i) << 4) | ((opc) & 0xff)); \
+}
+
+#define S390_RRS(c,opc,r1,r2,m3,b,d) do \
+{ \
+ s390_emit16(c, ((opc, & 0xff00) | (r1) << 4) | (r2)); \
+ s390_emit16(c, ((b) << 12) | (d)); \
+ s390_emit16(c, ((m3) << 12) | ((opc) & 0xff)); \
+}
+
+#define S390_SI(c,opc,s1,p1,m2) s390_emit32(c, (opc << 24 | (m2) << 16 | (s1) << 12 | ((p1) & 0xfff)));
+
+#define S390_SIY(c,opc,s1,p1,m2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | m2)); \
+ s390_emit32(c, ((s1) << 24 | (((p2) & 0xfffff) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_S(c,opc,s2,p2) s390_emit32(c, (opc << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_SS_1(c,opc,ln,s1,p1,s2,p2) do \
+{ \
+ s390_emit32(c, (opc << 24 | ((ln-1) & 0xff) << 16 | \
+ (s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SS_2(c,opc,n1,n2,s1,p1,s2,p2) do \
+{ \
+ s390_emit32(c, (opc << 24 | (n1) << 16 | (n2) << 12 | \
+ (s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SS_3(c,opc,g1,g3,s1,p1,s2,p2) do \
+{ \
+ s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \
+ (s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SS_4(c,opc,g1,g3,s2,p2,s4,p4) do \
+{ \
+ s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \
+ (s2) << 12 | ((p2) & 0xfff))); \
+ s390_emit16(c, ((s4) << 12 | ((p4) & 0xfff))); \
+} while (0)
+
+#define S390_SSE(c,opc,s1,p1,s2,p2) do \
+{ \
+ s390_emit16(c, opc); \
+ s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SSF(c,opc,r3,s1,p1,s2,p2) do \
+{ \
+ s390_emit16(c, (((opc) & 0xff00) << 8) | ((r3) << 4) | \
+ ((opc) & 0xf)); \
+ s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d)
+#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d)
+#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2)
+#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2)
+#define s390_afi(c, r, v) S390_RIL_1(c, 0xc29, r, v);
+#define s390_ag(c, r, x, b, d) S390_RXY(c, 0xe308, r, x, b, d)
+#define s390_agf(c, r, x, b, d) S390_RXY(c, 0xe318, r, x, b, d)
+#define s390_agfi(c, r, v) S390_RIL_1(c, 0xc28, r, v)
+#define s390_afgr(c, r1, r2) S390_RRE(c, 0xb918, r1, r2)
+#define s390_aghi(c, r, v) S390_RI(c, 0xa7b, r, v)
+#define s390_aghik(c, r, v) S390_RIE_1(c, 0xecd9, r, v)
+#define s390_agr(c, r1, r2) S390_RRE(c, 0xb908, r1, r2)
+#define s390_agrk(c, r1, r2, r3) S390_RRF_1(c, 0xb9e8, r1, r2, r3)
+#define s390_agsi(c, r, v) S390_SIY(c, 0xeb7a, r v)
+#define s390_ahhhr(c, r1, r2, r3) S390_RRF_1(c, 0xb9c8, r1, r2, r3)
+#define s390_ahhlr(c, r1, r2, r3) S390_RRF_1(c, 0xb9d8, r1, r2, r3)
+#define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v)
+#define s390_ahik(c, r, v) S390_RIE_1(c, 0xecd8, r, v)
+#define s390_ahy(c, r, x, b, d) S390_RXY(c, 0xe37a, r, b, d)
+#define s390_aih(c, r, v) S390_RIL_1(c, 0xcc8, r, v)
+#define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d)
+#define s390_alc(c, r, x, b, d) S390_RXY(c, 0xe398, r, x, b, d)
+#define s390_alcg(c, r, x, b, d) S390_RXY(c, 0xe388, r, x, b, d)
+#define s390_alcgr(c, r1, r2) S390_RRE(c, 0xb988, r1, r2)
+#define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2)
+#define s390_alfi(c, r, v) S390_RIL_1(c, 0xc2b, r, v)
+#define s390_alg(c, r, x, b, d) S390_RXY(c, 0xe30a, r, x, b, d)
+#define s390_algf(c, r, x, b, d) S390_RXY(c, 0xe31a, r, x, b, d)
+#define s390_algfi(c, r, v) S390_RIL_1(c, 0xc2a, r, v)
+#define s390_algfr(c, r1, r2) S390_RRE(c, 0xb91a, r1, r2)
+#define s390_alghsik(c, r, v) S390_RIE_1(c, 0xecd8, r, v)
+#define s390_algr(c, r1, r2) S390_RRE(c, 0xb90a, r1, r2)
+#define s390_algsi(c, r, v) S390_SIY(c, 0xeb7e, r, v)
+#define s390_alhhhr(c, r1, r2, r3) S390_RRF_1(c, 0xb9ca, r1, r2, r3)
+#define s390_alhhlr(c, r1, r2, r3) S390_RRF_1(c, 0xb9da, r1, r2, r3)
+#define s390_alhsik(c, r, v) S390_RIE_1(c, 0xecda, r, v)
+#define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2)
+#define s390_alrk(c, r1, r2) S390_RRF(c, 0xb9fa, r1, r2)
+#define s390_alsi(c, r, v) S390_SIY(c, 0xeb6e, r, v)
+#define s390_alsih(c, r, v) S390_RIL_1(c, 0xcca, r, v)
+#define s390_alsihn(c, r, v) S390_RIL_1(c, 0xccb, r, v)
+#define s390_aly(c, r, x, b, d) S390_RXY(c, 0xe35e, r, x, b, d)
+#define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2)
+#define s390_ark(c, r1, r2, r3) S390_RRF_1(c, 0xb9f8, r1, r2, r3)
+#define s390_asi(c, r, v) S390_SIY(c, 0xeb6a, r, v)
+#define s390_ay(c, r, x, b, d) S390_RXY(c, 0xe35a, r, x, b, d)
+#define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2)
+#define s390_bctr(c, r1, r2) S390_RR(c, 0x06, r1, r2)
+#define s390_bctrg(c, r1, r2) S390_RRE(c, 0xb946, r1, r2)
+#define s390_bnzr(c, r) S390_RR(c, 0x07, 0x07, r)
+#define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o)
+#define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o)
+#define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d)
+#define s390_brcl(c, m, d) S390_RIL_2(c, 0xc04, m, d)
+#define s390_br(c, r) S390_RR(c, 0x07, 0xf, r)
+#define s390_break(c) S390_RR(c, 0, 0, 0)
+#define s390_bzr(c, r) S390_RR(c, 0x07, 0x08, r)
+#define s390_c(c, r, x, b, d) S390_RX(c, 0x59, r, x, b, d)
+#define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d)
+#define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2)
+#define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2)
+#define s390_cdgbr(c, r1, r2) S390_RRE(c, 0xb3a5, r1, r2)
+#define s390_cds(c, r1, r2, b, d) S390_RX(c, 0xbb, r1, r2, b, d)
+#define s390_cdsg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb3e, r1, r2, b, d)
+#define s390_cdsy(c, r1, r2, b, d) S390_RSY_1(c, 0xeb31, r1, r2, b, d)
+#define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2)
+#define s390_cegbr(c, r1, r2) S390_RRE(c, 0xb3a4, r1, r2)
+#define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2)
+#define s390_cfi(c, r, v) S390_RIL_1(c, 0xc2d, r, v)
+#define s390_cgdbr(c, r1, m, r2) S390_RRF_2(c, 0xb3a9, r1, m, r2)
+#define s390_cg(c, r, x, b, d) S390_RXY(c, 0xe320, r, x, b, d)
+#define s390_cgfi(c, r, v) S390_RIL_1(c, 0xc2c, r, v)
+#define s390_cgfrl(c, r, v) S390_RIL_1(c, 0xc6c, r, v)
+#define s390_cghi(c, r, i) S390_RI(c, 0xa7f, r, i)
+#define s390_cgib(c, r, i, m, b, d) S390_RIS(c, 0xecfc, r, i, m, b, d)
+#define s390_cgij(c, r, i, m, d) S390_RIE_3(c, 0xec7c, r, i, m, d)
+#define s390_cgit(c, r, i, m) S390_RIE_4(c, 0xec70, r, i m);
+#define s390_cgr(c, r1, r2) S390_RRE(c, 0xb920, r1, r2)
+#define s390_cgrb(c, r1, r2, m3, b, d) S390_RRS(c, 0xece4, r1, r2, m3, b, d)
+#define s390_cgrj(c, r1, r2, m3, v) S390_RIE_2(c, 0xec64, r1, r2, m3, v)
+#define s390_cgrl(c, r, v) S390_RIL_1(c, 0xc68, r, v)
+#define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i)
+#define s390_cib(c, r, i, m, b, d) S390_RIS(c, 0xecfe, r, i, m, b, d)
+#define s390_cij(c, r, i, m, d) S390_RIE_3(c, 0xec7e, r, i, m, d)
+#define s390_cit(c, r, i, m) S390_RIE_4(c, 0xec72, r, i m);
+#define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d)
+#define s390_clg(c, r, x, b, d) S390_RXY(c, 0xe321, r, x, b, d)
+#define s390_clgib(c, r, i, m, b, d) S390_RIS(c, 0xecfd, r, i, m, b, d)
+#define s390_clgij(c, r, i, b) S390_RIE_3(c, 0xec7d, r, i, m, d)
+#define s390_clgr(c, r1, r2) S390_RRE(c, 0xb921, r1, r2)
+#define s390_clgrj(c, r1, r2, m, v) S390_RIE_2(c, 0xec65, r1, r2, m, v)
+#define s390_clgrb(c, r1, r2, m3, b, d) S390_RRS(c, 0xece5, r1, r2, m3, b, d)
+#define s390_clib(c, r, i, m, b, d) S390_RIS(c, 0xecff, r, i, m, b, d)
+#define s390_clij(c, r, i, b) S390_RIE_3(c, 0xec7f, r, i, m, d)
+#define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2)
+#define s390_clrb(c, r1, r2, m3, b, d) S390_RRS(c, 0xecf7, r1, r2, m3, b, d)
+#define s390_clrj(c, r1, r2, m, v) S390_RIE_2(c, 0xec77, r1, r2, m, v)
+#define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2)
+#define s390_crb(c, r1, r2, m3, b, d) S390_RRS(c, 0xecf6, r1, r2, m3, b, d)
+#define s390_crj(c, r1, r2, m3, v) S390_RIE_2(c, 0xec76, r1, r2, m3, v)
+#define s390_crl(c, r, v) S390_RIL_1(c, 0xc6d, r, v)
+#define s390_crt(c, r1, r2, m3) S390_RRF_2(c, 0xb972, r1, r2, m3);
+#define s390_cgrt(c, r1, r2, m3) S390_RRF_2(c, 0xb960, r1, r2, m3);
+#define s390_cs(c, r1, r2, b, d) S390_RX(c, 0xba, r1, r2, b, d)
+#define s390_csg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb30, r1, r2, b, d)
+#define s390_csst(c, d1, b1, d2, b2, r) S390_SSF(c, 0xc82, b1, d1, b2, d2, r)
+#define s390_csy(c, r1, r2, b, d) S390_RSY_1(c, 0xeb14, r1, r2, b, d)
+#define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2)
+#define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2)
+#define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3)
+#define s390_dlgr(c, r1, r2) S390_RRE(c, 0xb987, r1, r2)
+#define s390_dlr(c, r1, r2) S390_RRE(c, 0xb997, r1, r2)
+#define s390_dr(c, r1, r2) S390_RR(c, 0x1d, r1, r2)
+#define s390_dsgfr(c, r1, r2) S390_RRE(c, 0xb91d, r1, r2)
+#define s390_dsgr(c, r1, r2) S390_RRE(c, 0xb90d, r1, r2)
+#define s390_ear(c, r1, r2) S390_RRE(c, 0xb24f, r1, r2)
+#define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d)
+#define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d)
+#define s390_icmy(c, r, x, b, d) S390_RXY(c, 0xeb81, r, x, b, d)
+#define s390_icy(c, r, x, b, d) S390_RXY(c, 0xe373, r, x, b, d)
+#define s390_iihf(c, r, v) S390_RIL_1(c, 0xc08, r, v)
+#define s390_iihh(c, r, v) S390_RI(c, 0xa50, r, v)
+#define s390_iihl(c, r, v) S390_RI(c, 0xa51, r, v)
+#define s390_iilf(c, r, v) S390_RIL_1(c, 0xc09, r, v)
+#define s390_iilh(c, r, v) S390_RI(c, 0xa52, r, v)
+#define s390_iill(c, r, v) S390_RI(c, 0xa53, r, v)
+#define s390_j(c,d) s390_brc(c, S390_CC_UN, d)
+#define s390_jc(c, m, d) s390_brc(c, m, d)
+#define s390_jcl(c, m, d) s390_brcl(c, m, d)
+#define s390_jcy(c, d) s390_brc(c, S390_CC_CY, d)
+#define s390_je(c, d) s390_brc(c, S390_CC_EQ, d)
+#define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d)
+#define s390_jh(c, d) s390_brc(c, S390_CC_GT, d)
+#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d)
+#define s390_jl(c, d) s390_brc(c, S390_CC_LT, d)
+#define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d)
+#define s390_jm(c, d) s390_brc(c, S390_CC_LT, d)
+#define s390_jnc(c, d) s390_brc(c, S390_CC_NC, d)
+#define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d)
+#define s390_jnh(c, d) s390_brc(c, S390_CC_LE, d)
+#define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d)
+#define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d)
+#define s390_jo(c, d) s390_brc(c, S390_CC_OV, d)
+#define s390_jno(c, d) s390_brc(c, S390_CC_NO, d)
+#define s390_jp(c, d) s390_brc(c, S390_CC_GT, d)
+#define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d)
+#define s390_jg(c,d) s390_brcl(c, S390_CC_UN, d)
+#define s390_jgcy(c, d) s390_brcl(c, S390_CC_CY, d)
+#define s390_jge(c, d) s390_brcl(c, S390_CC_EQ, d)
+#define s390_jgeo(c, d) s390_brcl(c, S390_CC_ZR|S390_CC_OV, d)
+#define s390_jgh(c, d) s390_brcl(c, S390_CC_GT, d)
+#define s390_jgho(c, d) s390_brcl(c, S390_CC_GT|S390_CC_OV, d)
+#define s390_jgl(c, d) s390_brcl(c, S390_CC_LT, d)
+#define s390_jglo(c, d) s390_brcl(c, S390_CC_LT|S390_CC_OV, d)
+#define s390_jgm(c, d) s390_brcl(c, S390_CC_LT, d)
+#define s390_jgnc(c, d) s390_brcl(c, S390_CC_NC, d)
+#define s390_jgne(c, d) s390_brcl(c, S390_CC_NZ, d)
+#define s390_jgnh(c, d) s390_brcl(c, S390_CC_LE, d)
+#define s390_jgnl(c, d) s390_brcl(c, S390_CC_GE, d)
+#define s390_jgnz(c, d) s390_brcl(c, S390_CC_NZ, d)
+#define s390_jgo(c, d) s390_brcl(c, S390_CC_OV, d)
+#define s390_jgno(c, d) s390_brcl(c, S390_CC_NO, d)
+#define s390_jgp(c, d) s390_brcl(c, S390_CC_GT, d)
+#define s390_jgz(c, d) s390_brcl(c, S390_CC_ZR, d)
+#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d)
+#define s390_ly(c, r, x, b, d) S390_RXY(c, 0xe358, r, x, b, d)
+#define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d)
+#define s390_lay(c, r, x, b, d) S390_RXY(c, 0xe371, r, x, b, d)
+#define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d)
+#define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o)
+#define s390_lb(c, r, x, b, d) S390_RXY(c, 0xe376, r, x, b, d)
+#define s390_lbr(c, r1, r2) S390_RRE(c, 0xb926, r1, r2)
+#define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2)
+#define s390_lcgr(c, r1, r2) S390_RRE(c, 0xb903, r1, r2)
+#define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2)
+#define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d)
+#define s390_ldy(c, r, x, b, d) S390_RXY(c, 0xed65, r, x, b, d)
+#define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d)
+#define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2)
+#define s390_ldgr(c, r1, r2) S390_RRE(c, 0xb3c1, r1, r2)
+#define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2)
+#define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d)
+#define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2)
+#define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2)
+#define s390_ley(c, r, x, b, d) S390_RXY(c, 0xed64, r, x, b, d)
+#define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d)
+#define s390_lgb(c, r, x, b, d) S390_RXY(c, 0xe377, r, x, b, d)
+#define s390_lgbr(c, r1, r2) S390_RRE(c, 0xb906, r1, r2)
+#define s390_lgdr(c, r1, r2) S390_RRE(c, 0xb3cd, r1, r2)
+#define s390_lgf(c, r, x, b, d) S390_RXY(c, 0xe314, r, x, b, d)
+#define s390_lgfi(c, r, v) S390_RIL_1(c, 0xc01, r, v)
+#define s390_lgfrl(c, r1, d) S390_RIL_1(c, 0xc4c, r1, d)
+#define s390_lgfr(c, r1, r2) S390_RRE(c, 0xb914, r1, r2)
+#define s390_lgh(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d)
+#define s390_lghi(c, r, v) S390_RI(c, 0xa79, r, v)
+#define s390_lghr(c, r1, r2) S390_RRE(c, 0xb907, r1, r2)
+#define s390_lgr(c, r1, r2) S390_RRE(c, 0xb904, r1, r2)
+#define s390_lgrl(c, r1, d) S390_RIL_1(c, 0xc48, r1, d)
+#define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d)
+#define s390_lhr(c, r1, r2) S390_RRE(c, 0xb927, r1, r2)
+#define s390_lhg(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d)
+#define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v)
+#define s390_lhy(c, r, x, b, d) S390_RXY(c, 0xe378, r, x, b, d)
+#define s390_llcr(c, r1, r2) S390_RRE(c, 0xb994, r1, r2)
+#define s390_llgc(c, r, x, b, d) S390_RXY(c, 0xe390, r, x, b, d)
+#define s390_llgcr(c, r1, r2) S390_RRE(c, 0xb984, r1, r2)
+#define s390_llgf(c, r, x, b, d) S390_RXY(c, 0xe316, r, x, b, d)
+#define s390_llgfr(c, r1, r2) S390_RRE(c, 0xb916, r1, r2)
+#define s390_llgh(c, r, x, b, d) S390_RXY(c, 0xe391, r, x, b, d)
+#define s390_llghr(c, r1, r2) S390_RRE(c, 0xb985, r1, r2)
+#define s390_llhr(c, r1, r2) S390_RRE(c, 0xb995, r1, r2)
+#define s390_llihf(c, r, v) S390_RIL_1(c, 0xc0e, r, v)
+#define s390_llihh(c, r, v) S390_RI(c, 0xa5c, r, v)
+#define s390_llihl(c, r, v) S390_RI(c, 0xa5d, r, v)
+#define s390_llilf(c, r, v) S390_RIL_1(c, 0xc0f, r, v)
+#define s390_llilh(c, r, v) S390_RI(c, 0xa5e, r, v)
+#define s390_llill(c, r, v) S390_RI(c, 0xa5f, r, v)
+#define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d)
+#define s390_lmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb04, r1, r2, b, d)
+#define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2)
+#define s390_lngr(c, r1, r2) S390_RRE(c, 0xb901, r1, r2)
+#define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2)
+#define s390_lpdbr(c, r1, r2) S390_RRE(c, 0xb310, r1, r2)
+#define s390_lpgr(c, r1, r2) S390_RRE(c, 0xb900, r1, r2)
+#define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2)
+#define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2)
+#define s390_lrl(c, r1, d) S390_RIL_1(c, 0xc4d, r1, d)
+#define s390_ltgfr(c, r1, r2) S390_RRE(c, 0xb912, r1, r2)
+#define s390_ltgr(c, r1, r2) S390_RRE(c, 0xb902, r1, r2)
+#define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2)
+#define s390_lzdr(c, r) S390_RRE(c, 0xb375, r, 0)
+#define s390_lzer(c, r) S390_RRE(c, 0xb374, r, 0)
+#define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d)
+#define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2)
+#define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2)
+#define s390_mfy(c, r, x, b, d) S390_RXY(c, 0xe35c, r, x, b, d)
+#define s390_mlgr(c, r1, r2) S390_RRE(c, 0xb986, r1, r2)
+#define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2)
+#define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2)
+#define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d)
+#define s390_msi(c, r, v) S390_RIL_1(c, 0xc21, r, v)
+#define s390_msgfr(c, r1, r2) S390_RRE(c, 0xb91c, r1, r2)
+#define s390_msgi(c, r, v) S390_RIL_1(c, 0xc20, r, v)
+#define s390_msgr(c, r1, r2) S390_RRE(c, 0xb90c, r1, r2)
+#define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2)
+#define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2)
+#define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2)
+#define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2)
+#define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d)
+#define s390_nc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd4, l, b1, d1, b2, d2)
+#define s390_ng(c, r, x, b, d) S390_RXY(c, 0xe380, r, x, b, d)
+#define s390_ngr(c, r1, r2) S390_RRE(c, 0xb980, r1, r2)
+#define s390_ngrk(c, r1, r2, r3) S390_RRF_1(c, 0xb9e4, r1, r2, r3)
+#define s390_ni(c, b, d, v) S390_SI(c, 0x94, b, d, v)
+#define s390_nihf(c, r, v) S390_RIL_1(c, 0xc0a, r, v)
+#define s390_nihh(c, r, v) S390_RI(c, 0xa54, r, v)
+#define s390_nihl(c, r, v) S390_RI(c, 0xa55, r, v)
+#define s390_nilf(c, r, v) S390_RIL_1(c, 0xc0b, r, v)
+#define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v)
+#define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v)
+#define s390_niy(c, b, d, v) S390_SIY(c, 0xeb54, b, d, v)
+#define s390_nop(c) S390_RR(c, 0x07, 0x0, 0)
+#define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2)
+#define s390_nrk(c, r1, r2) S390_RRF_1(c, 0xb9f4, r1, r2)
+#define s390_ny(c, r, x, b, d) S390_RRY(c, 0xe354, r1, r2)
+#define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d)
+#define s390_oihf(c, r, v) S390_RIL_1(c, 0xc0c, r, v)
+#define s390_oihh(c, r, v) S390_RI(c, 0xa58, r, v)
+#define s390_oihl(c, r, v) S390_RI(c, 0xa59, r, v)
+#define s390_oilf(c, r, v) S390_RIL_1(c, 0xc0d, r, v)
+#define s390_oilh(c, r, v) S390_RI(c, 0xa5a, r, v)
+#define s390_oill(c, r, v) S390_RI(c, 0xa5b` r, v)
+#define s390_oiy(c, b, d, v) S390_SIY(c, 0xeb56 b, d, v)
+#define s390_og(c, r, x, b, d) S390_RXY(c, 0xe381, r, x, b, d)
+#define s390_ogr(c, r1, r2) S390_RRE(c, 0xb981, r1, r2)
+#define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2)
+#define s390_s(c, r, x, b, d) S390_RX(c, 0x5b, r, x, b, d)
+#define s390_sdb(c, r, x, b, d) S390_RXE(c, 0xed1b, r, x, b, d)
+#define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2)
+#define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2)
+#define s390_sg(c, r, x, b, d) S390_RXY(c, 0xe309, r, x, b, d)
+#define s390_sgf(c, r, x, b, d) S390_RXY(c, 0xe319, r, x, b, d)
+#define s390_sgr(c, r1, r2) S390_RRE(c, 0xb909, r1, r2)
+#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d)
+#define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d)
+#define s390_slag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0b, r1, r2, b, d)
+#define s390_slbg(c, r, x, b, d) S390_RXY(c, 0xe389, r, x, b, d)
+#define s390_slbgr(c, r1, r2) S390_RRE(c, 0xb989, r1, r2)
+#define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2)
+#define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d)
+#define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d)
+#define s390_slfi(c, r, v) S390_RIL_1(c, 0xc25, r, v)
+#define s390_slg(c, r, x, b, d) S390_RXY(c, 0xe30b, r, x, b, d)
+#define s390_slgf(c, r, x, b, d) S390_RXY(c, 0xe31b, r, x, b, d)
+#define s390_slgfr(c, r1, r2) S390_RRE(c, 0xb91b, r1, r2)
+#define s390_slgfi(c, r, v) S390_RIL_1(c, 0xc24, r, v)
+#define s390_slgr(c, r1, r2) S390_RRE(c, 0xb90b, r1, r2)
+#define s390_sll(c, r, b, d) S390_RS_3(c, 0x89, r, b, d)
+#define s390_sllg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0d, r1, r2, b, d)
+#define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2)
+#define s390_sqdbr(c, r1, r2) S390_RRE(c, 0xb315, r1, r2)
+#define s390_sqebr(c, r1, r2) S390_RRE(c, 0xb314, r1, r2)
+#define s390_sra(c, r, b, d) S390_RS_3(c, 0x8a, r, b, d)
+#define s390_srag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0a, r1, r2, b, d)
+#define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2)
+#define s390_srda(c, r, b, d) S390_RS_3(c, 0x8e, r, b, d)
+#define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d)
+#define s390_srl(c, r, b, d) S390_RS_3(c, 0x88, r, b, d)
+#define s390_srlg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0c, r1, r2, b, d)
+#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d)
+#define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d)
+#define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d)
+#define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d)
+#define s390_stcmy(c, r, x, b, d) S390_RXY(c, 0xeb2d, r, x, b, d)
+#define s390_stcy(c, r, x, b, d) S390_RXY(c, 0xe372, r, x, b, d)
+#define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d)
+#define s390_stdy(c, r, x, b, d) S390_RXY(c, 0xed67, r, x, b, d)
+#define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d)
+#define s390_stey(c, r, x, b, d) S390_RXY(c, 0xed66, r, x, b, d)
+#define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d)
+#define s390_stg(c, r, x, b, d) S390_RXY(c, 0xe324, r, x, b, d)
+#define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d)
+#define s390_sthy(c, r, x, b, d) S390_RXY(c, 0xe370, r, x, b, d)
+#define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d)
+#define s390_stmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb24, r1, r2, b, d)
+#define s390_sty(c, r, x, b, d) S390_RXY(c, 0xe350, r, x, b, d)
+#define s390_tcdb(c, r, x, b, d) S390_RXE(c, 0xed11, r, x, b, d)
+#define s390_tceb(c, r, x, b, d) S390_RXE(c, 0xed10, r, x, b, d)
+#define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d)
+#define s390_xihf(c, r, v) S390_RIL_1(c, 0xc06, r, v)
+#define s390_xilf(c, r, v) S390_RIL_1(c, 0xc07, r, v)
+#define s390_xg(c, r, x, b, d) S390_RXY(c, 0xe382, r, x, b, d)
+#define s390_xgr(c, r1, r2) S390_RRE(c, 0xb982, r1, r2)
+#define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2)
+#define s390_xy(c, r, x, b, d) S390_RXY(c, 0xe357, r, x, b, d)
+#endif
diff --git a/lib/ffts/src/arch/s390x/tramp.c b/lib/ffts/src/arch/s390x/tramp.c
new file mode 100644
index 0000000..fe9f310
--- /dev/null
+++ b/lib/ffts/src/arch/s390x/tramp.c
@@ -0,0 +1,1149 @@
+/*------------------------------------------------------------------*/
+/* */
+/* Name - tramp.c */
+/* */
+/* Function - Create trampolines to invoke arbitrary functions. */
+/* */
+/* Name - Neale Ferguson. */
+/* */
+/* Date - October, 2002 */
+/* */
+/* */
+/*------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------*/
+/* D e f i n e s */
+/*------------------------------------------------------------------*/
+
+#define PROLOG_INS 24 /* Size of emitted prolog */
+#define CALL_INS 4 /* Size of emitted call */
+#define EPILOG_INS 18 /* Size of emitted epilog */
+
+#define DEBUG(x)
+
+/*========================= End of Defines =========================*/
+
+/*------------------------------------------------------------------*/
+/* I n c l u d e s */
+/*------------------------------------------------------------------*/
+
+#ifdef NEED_MPROTECT
+# include <sys/mman.h>
+# include <limits.h> /* for PAGESIZE */
+# ifndef PAGESIZE
+# define PAGESIZE 4096
+# endif
+#endif
+
+#include "config.h"
+#include <stdlib.h>
+#include <string.h>
+#include "s390x-codegen.h"
+#include "mono/metadata/class.h"
+#include "mono/metadata/tabledefs.h"
+#include "mono/interpreter/interp.h"
+#include "mono/metadata/appdomain.h"
+#include "mono/metadata/marshal.h"
+
+/*========================= End of Includes ========================*/
+
+/*------------------------------------------------------------------*/
+/* T y p e d e f s */
+/*------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------*/
+/* Structure used to accummulate size of stack, code, and locals */
+/*------------------------------------------------------------------*/
+typedef struct {
+ guint stack_size,
+ local_size,
+ code_size,
+ retStruct;
+} size_data;
+
+/*========================= End of Typedefs ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - add_general */
+/* */
+/* Function - Determine code and stack size incremements for a */
+/* parameter. */
+/* */
+/*------------------------------------------------------------------*/
+
+static void inline
+add_general (guint *gr, size_data *sz, gboolean simple)
+{
+ if (simple) {
+ if (*gr >= GENERAL_REGS) {
+ sz->stack_size += sizeof(long);
+ sz->code_size += 12;
+ } else {
+ sz->code_size += 8;
+ }
+ } else {
+ if (*gr >= GENERAL_REGS - 1) {
+ sz->stack_size += 8 + (sz->stack_size % 8);
+ sz->code_size += 10;
+ } else {
+ sz->code_size += 8;
+ }
+ (*gr) ++;
+ }
+ (*gr) ++;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - calculate_sizes */
+/* */
+/* Function - Determine the amount of space required for code */
+/* and stack. In addition determine starting points */
+/* for stack-based parameters, and area for struct- */
+/* ures being returned on the stack. */
+/* */
+/*------------------------------------------------------------------*/
+
+static void inline
+calculate_sizes (MonoMethodSignature *sig, size_data *sz,
+ gboolean string_ctor)
+{
+ guint i, fr, gr, size;
+ guint32 simpletype, align;
+
+ fr = 0;
+ gr = 2;
+ sz->retStruct = 0;
+ sz->stack_size = S390_MINIMAL_STACK_SIZE;
+ sz->code_size = (PROLOG_INS + CALL_INS + EPILOG_INS);
+ sz->local_size = 0;
+
+ if (sig->hasthis) {
+ add_general (&gr, sz, TRUE);
+ }
+
+ /*----------------------------------------------------------*/
+ /* We determine the size of the return code/stack in case we*/
+ /* need to reserve a register to be used to address a stack */
+ /* area that the callee will use. */
+ /*----------------------------------------------------------*/
+
+ if (sig->ret->byref || string_ctor) {
+ sz->code_size += 8;
+ } else {
+ simpletype = sig->ret->type;
+enum_retvalue:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ sz->code_size += 4;
+ break;
+ case MONO_TYPE_I8:
+ sz->code_size += 4;
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ gr++;
+ if (sig->pinvoke)
+ size = mono_class_native_size (sig->ret->data.klass, &align);
+ else
+ size = mono_class_value_size (sig->ret->data.klass, &align);
+ if (align > 1)
+ sz->code_size += 10;
+ switch (size) {
+ /*----------------------------------*/
+ /* On S/390, structures of size 1, */
+ /* 2, 4, and 8 bytes are returned */
+ /* in (a) register(s). */
+ /*----------------------------------*/
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ sz->code_size += 16;
+ sz->stack_size += 4;
+ break;
+ default:
+ sz->retStruct = 1;
+ sz->code_size += 32;
+ }
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", sig->ret->type);
+ }
+ }
+
+ /*----------------------------------------------------------*/
+ /* We determine the size of the parameter code and stack */
+ /* requirements by checking the types and sizes of the */
+ /* parameters. */
+ /*----------------------------------------------------------*/
+
+ for (i = 0; i < sig->param_count; ++i) {
+ if (sig->params [i]->byref) {
+ add_general (&gr, sz, TRUE);
+ continue;
+ }
+ simpletype = sig->params [i]->type;
+ enum_calc_size:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ add_general (&gr, sz, TRUE);
+ break;
+ case MONO_TYPE_SZARRAY:
+ add_general (&gr, sz, TRUE);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->params [i]->data.klass->enumtype) {
+ simpletype = sig->params [i]->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+ if (sig->pinvoke)
+ size = mono_class_native_size (sig->params [i]->data.klass, &align);
+ else
+ size = mono_class_value_size (sig->params [i]->data.klass, &align);
+ DEBUG(printf("%d typesize: %d (%d)\n",i,size,align));
+ switch (size) {
+ /*----------------------------------*/
+ /* On S/390, structures of size 1, */
+ /* 2, 4, and 8 bytes are passed in */
+ /* (a) register(s). */
+ /*----------------------------------*/
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ add_general(&gr, sz, TRUE);
+ break;
+ case 8:
+ add_general(&gr, sz, FALSE);
+ break;
+ default:
+ sz->local_size += (size + (size % align));
+ sz->code_size += 40;
+ }
+ break;
+ case MONO_TYPE_I8:
+ add_general (&gr, sz, FALSE);
+ break;
+ case MONO_TYPE_R4:
+ if (fr < FLOAT_REGS) {
+ sz->code_size += 4;
+ fr++;
+ }
+ else {
+ sz->code_size += 4;
+ sz->stack_size += 8;
+ }
+ break;
+ case MONO_TYPE_R8:
+ if (fr < FLOAT_REGS) {
+ sz->code_size += 4;
+ fr++;
+ } else {
+ sz->code_size += 4;
+ sz->stack_size += 8 + (sz->stack_size % 8);
+ }
+ break;
+ default:
+ g_error ("Can't trampoline 0x%x", sig->params [i]->type);
+ }
+ }
+
+
+ /* align stack size to 8 */
+ DEBUG (printf (" stack size: %d (%d)\n"
+ " code size: %d\n"
+ " local size: %d\n",
+ (sz->stack_size + 8) & ~8, sz->stack_size,
+ (sz->code_size),(sz->local_size + 8) & ~8));
+ sz->stack_size = (sz->stack_size + 8) & ~8;
+ sz->local_size = (sz->local_size + 8) & ~8;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - emit_prolog */
+/* */
+/* Function - Create the instructions that implement the stand- */
+/* ard function prolog according to the S/390 ABI. */
+/* */
+/*------------------------------------------------------------------*/
+
+static inline guint8 *
+emit_prolog (guint8 *p, MonoMethodSignature *sig, size_data *sz)
+{
+ guint stack_size;
+
+ stack_size = sz->stack_size + sz->local_size;
+
+ /* function prolog */
+ s390_stmg(p, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET);
+ s390_lg (p, s390_r7, 0, STK_BASE, MINV_POS);
+ s390_lgr (p, s390_r11, STK_BASE);
+ s390_aghi(p, STK_BASE, -stack_size);
+ s390_stg (p, s390_r11, 0, STK_BASE, 0);
+
+ /*-----------------------------------------*/
+ /* Save: */
+ /* - address of "callme" */
+ /* - address of "retval" */
+ /* - address of "arguments" */
+ /*-----------------------------------------*/
+ s390_lgr (p, s390_r9, s390_r2);
+ s390_lgr (p, s390_r8, s390_r3);
+ s390_lgr (p, s390_r10, s390_r5);
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - emit_save_parameters */
+/* */
+/* Function - Create the instructions that load registers with */
+/* parameters, place others on the stack according */
+/* to the S/390 ABI. */
+/* */
+/* The resulting function takes the form: */
+/* void func (void (*callme)(), void *retval, */
+/* void *this_obj, stackval *arguments); */
+/* */
+/*------------------------------------------------------------------*/
+
+inline static guint8*
+emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz)
+{
+ guint i, fr, gr, act_strs, align,
+ stack_par_pos, size, local_pos;
+ guint32 simpletype;
+
+ /*----------------------------------------------------------*/
+ /* If a structure on stack is being returned, reserve r2 */
+ /* to point to an area where it can be passed. */
+ /*----------------------------------------------------------*/
+ if (sz->retStruct)
+ gr = 1;
+ else
+ gr = 0;
+ fr = 0;
+ act_strs = 0;
+ stack_par_pos = S390_MINIMAL_STACK_SIZE;
+ local_pos = sz->stack_size;
+
+ if (sig->hasthis) {
+ s390_lr (p, s390_r2 + gr, s390_r4);
+ gr++;
+ }
+
+ act_strs = 0;
+ for (i = 0; i < sig->param_count; ++i) {
+ DEBUG(printf("par: %d type: %d ref: %d\n",i,sig->params[i]->type,sig->params[i]->byref));
+ if (sig->params [i]->byref) {
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG);
+ gr ++;
+ } else {
+ s390_lg (p, s390_r0, 0, ARG_BASE, STKARG);
+ s390_stg(p, s390_r0, 0, STK_BASE, stack_par_pos);
+ stack_par_pos += sizeof(long);
+ }
+ continue;
+ }
+ simpletype = sig->params [i]->type;
+ enum_calc_size:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_SZARRAY:
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG);
+ gr ++;
+ } else {
+ s390_lg (p, s390_r0, 0, ARG_BASE, STKARG);
+ s390_stg(p, s390_r0, 0, STK_BASE, stack_par_pos);
+ stack_par_pos += sizeof(long);
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->params [i]->data.klass->enumtype) {
+ simpletype = sig->params [i]->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+ if (sig->pinvoke)
+ size = mono_class_native_size (sig->params [i]->data.klass, &align);
+ else
+ size = mono_class_value_size (sig->params [i]->data.klass, &align);
+ DEBUG(printf("parStruct - size %d pinvoke: %d\n",size,sig->pinvoke));
+ switch (size) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0,ARG_BASE, STKARG);
+ s390_lgf(p, s390_r2 + gr, 0, s390_r2 + gr, 0);
+ gr++;
+ } else {
+ stack_par_pos += (stack_par_pos % align);
+ s390_lg (p, s390_r10, 0,ARG_BASE, STKARG);
+ s390_lgf(p, s390_r10, 0, s390_r10, 0);
+ s390_st (p, s390_r10, 0, STK_BASE, stack_par_pos);
+ stack_par_pos += sizeof(long);
+ }
+ break;
+ case 8:
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG);
+ s390_lg (p, s390_r2 + gr, 0, s390_r2 + gr, 0);
+ } else {
+ stack_par_pos += (stack_par_pos % align);
+ s390_lg (p, s390_r10, 0, ARG_BASE, STKARG);
+ s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, s390_r10, 0);
+ stack_par_pos += sizeof(long long);
+ }
+ break;
+ default:
+ if (size <= 256) {
+ local_pos += (local_pos % align);
+ s390_lg (p, s390_r13, 0, ARG_BASE, STKARG);
+ s390_mvc (p, size, STK_BASE, local_pos, s390_r13, 0);
+ s390_la (p, s390_r13, 0, STK_BASE, local_pos);
+ local_pos += size;
+ } else {
+ local_pos += (local_pos % align);
+ s390_bras (p, s390_r13, 4);
+ s390_llong(p, size);
+ s390_lg (p, s390_r1, 0, s390_r13, 0);
+ s390_lg (p, s390_r0, 0, ARG_BASE, STKARG);
+ s390_lgr (p, s390_r14, s390_r12);
+ s390_la (p, s390_r12, 0, STK_BASE, local_pos);
+ s390_lgr (p, s390_r13, s390_r1);
+ s390_mvcl (p, s390_r12, s390_r0);
+ s390_lgr (p, s390_r12, s390_r14);
+ s390_la (p, s390_r13, 0, STK_BASE, local_pos);
+ local_pos += size;
+ }
+ if (gr < GENERAL_REGS) {
+ s390_lgr(p, s390_r2 + gr, s390_r13);
+ gr++;
+ } else {
+ s390_stg(p, s390_r13, 0, STK_BASE, stack_par_pos);
+ stack_par_pos += sizeof(long);
+ }
+ }
+ break;
+ case MONO_TYPE_I8:
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG);
+ gr += 2;
+ } else {
+ *(guint32 *) p += 7;
+ *(guint32 *) p &= ~7;
+ s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, ARG_BASE, STKARG);
+ stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long));
+ }
+ break;
+ case MONO_TYPE_R4:
+ if (fr < FLOAT_REGS) {
+ s390_le (p, s390_r0 + fr, 0, ARG_BASE, STKARG);
+ fr++;
+ } else {
+ s390_mvc (p, sizeof(float), STK_BASE, stack_par_pos, ARG_BASE, STKARG);
+ stack_par_pos += sizeof(float);
+ }
+ break;
+ case MONO_TYPE_R8:
+ if (fr < FLOAT_REGS) {
+ s390_ld (p, s390_r0 + fr, 0, ARG_BASE, STKARG);
+ fr++;
+ } else {
+ *(guint32 *) p += 7;
+ *(guint32 *) p &= ~7;
+ s390_mvc (p, sizeof(double), STK_BASE, stack_par_pos, ARG_BASE, STKARG);
+ stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long));
+ }
+ break;
+ default:
+ g_error ("Can't trampoline 0x%x", sig->params [i]->type);
+ }
+ }
+
+ /*----------------------------------------------------------*/
+ /* If we're returning a structure but not in a register */
+ /* then point the result area for the called routine */
+ /*----------------------------------------------------------*/
+ if (sz->retStruct) {
+ s390_lg (p, s390_r2, 0, s390_r8, 0);
+ }
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - alloc_code_memory */
+/* */
+/* Function - Allocate space to place the emitted code. */
+/* */
+/*------------------------------------------------------------------*/
+
+static inline guint8 *
+alloc_code_memory (guint code_size)
+{
+ guint8 *p;
+
+#ifdef NEED_MPROTECT
+ p = g_malloc (code_size + PAGESIZE - 1);
+
+ /* Align to a multiple of PAGESIZE, assumed to be a power of two */
+ p = (char *)(((int) p + PAGESIZE-1) & ~(PAGESIZE-1));
+#else
+ p = g_malloc (code_size);
+#endif
+ DEBUG (printf (" align: %p (%d)\n", p, (guint)p % 4));
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - emit_call_and_store_retval */
+/* */
+/* Function - Emit code that will implement the call to the */
+/* desired function, and unload the result according */
+/* to the S390 ABI for the type of value returned */
+/* */
+/*------------------------------------------------------------------*/
+
+static inline guint8 *
+emit_call_and_store_retval (guint8 *p, MonoMethodSignature *sig,
+ size_data *sz, gboolean string_ctor)
+{
+ guint32 simpletype;
+ guint retSize, align;
+
+ /* call "callme" */
+ s390_basr (p, s390_r14, s390_r9);
+
+ /* get return value */
+ if (sig->ret->byref || string_ctor) {
+ s390_stg(p, s390_r2, 0, s390_r8, 0);
+ } else {
+ simpletype = sig->ret->type;
+enum_retvalue:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ s390_stc (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ s390_sth (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ s390_st (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_R4:
+ s390_ste (p, s390_f0, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_R8:
+ s390_std (p, s390_f0, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_I8:
+ s390_stg (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ if (sig->pinvoke)
+ retSize = mono_class_native_size (sig->ret->data.klass, &align);
+ else
+ retSize = mono_class_value_size (sig->ret->data.klass, &align);
+printf("Returning %d bytes for type %d (%d)\n",retSize,simpletype,sig->pinvoke);
+ switch(retSize) {
+ case 0:
+ break;
+ case 1:
+ s390_stc (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case 2:
+ s390_sth (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case 4:
+ s390_st (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case 8:
+ s390_stg (p, s390_r2, 0, s390_r8, 0);
+ break;
+ default: ;
+ /*------------------------------------------*/
+ /* The callee has already placed the result */
+ /* in the required area */
+ /*------------------------------------------*/
+ }
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x",
+ sig->ret->type);
+ }
+ }
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - emit_epilog */
+/* */
+/* Function - Create the instructions that implement the stand- */
+/* ard function epilog according to the S/390 ABI. */
+/* */
+/*------------------------------------------------------------------*/
+
+static inline guint8 *
+emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz)
+{
+ /* function epilog */
+ s390_lg (p, STK_BASE, 0, STK_BASE, 0);
+ s390_lg (p, s390_r4, 0, STK_BASE, S390_RET_ADDR_OFFSET);
+ s390_lmg (p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET);
+ s390_br (p, s390_r4);
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_create_trampoline. */
+/* */
+/* Function - Create the code that will allow a mono method to */
+/* invoke a system subroutine. */
+/* */
+/*------------------------------------------------------------------*/
+
+MonoPIFunc
+mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor)
+{
+ guint8 *p, *code_buffer;
+ size_data sz;
+
+ DEBUG (printf ("\nPInvoke [start emiting]\n"));
+ calculate_sizes (sig, &sz, string_ctor);
+
+ p = code_buffer = alloc_code_memory (sz.code_size);
+ p = emit_prolog (p, sig, &sz);
+ p = emit_save_parameters (p, sig, &sz);
+ p = emit_call_and_store_retval (p, sig, &sz, string_ctor);
+ p = emit_epilog (p, sig, &sz);
+
+#ifdef NEED_MPROTECT
+ if (mprotect (code_buffer, 1024, PROT_READ | PROT_WRITE | PROT_EXEC)) {
+ g_error ("Cannot mprotect trampoline\n");
+ }
+#endif
+
+ DEBUG (printf ("emited code size: %d\n", p - code_buffer));
+
+ DEBUG (printf ("PInvoke [end emiting]\n"));
+
+ return (MonoPIFunc) code_buffer;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_create_method_pointer */
+/* */
+/* Function - Returns a pointer to a native function that can */
+/* be used to call the specified method. */
+/* */
+/* The function created will receive the arguments */
+/* according to the calling convention specified in */
+/* in the method. */
+/* */
+/* This function works by creating a MonoInvocation */
+/* structure, filling the fields in and calling */
+/* ves_exec_method() on it. */
+/* */
+/* Logic: */
+/* ------ */
+/* mono_arch_create_method_pointer (MonoMethod *method) */
+/* create the unmanaged->managed wrapper */
+/* register it with mono_jit_info_table_add() */
+/* */
+/* What does the unmanaged->managed wrapper do? */
+/* allocate a MonoInvocation structure (inv) on the stack */
+/* allocate an array of stackval on the stack with length = */
+/* method->signature->param_count + 1 [call it stack_args] */
+/* set inv->ex, inv->ex_handler, inv->parent to NULL */
+/* set inv->method to method */
+/* if method is an instance method, set inv->obj to the */
+/* 'this' argument (the first argument) else set to NULL */
+/* for each argument to the method call: */
+/* stackval_from_data (sig->params[i], &stack_args[i], */
+/* arg, sig->pinvoke); */
+/* Where: */
+/* ------ */
+/* sig - is method->signature */
+/* &stack_args[i] - is the pointer to the ith element */
+/* in the stackval array */
+/* arg - is a pointer to the argument re- */
+/* ceived by the function according */
+/* to the call convention. If it */
+/* gets passed in a register, save */
+/* on the stack first. */
+/* */
+/* set inv->retval to the address of the last element of */
+/* stack_args [recall we allocated param_count+1 of them] */
+/* call ves_exec_method(inv) */
+/* copy the returned value from inv->retval where the calling */
+/* convention expects to find it on return from the wrap- */
+/* per [if it's a structure, use stackval_to_data] */
+/* */
+/*------------------------------------------------------------------*/
+
+void *
+mono_arch_create_method_pointer (MonoMethod *method)
+{
+ MonoMethodSignature *sig;
+ MonoJitInfo *ji;
+ guint8 *p, *code_buffer;
+ guint i, align = 0, simple_type, retSize, reg_save = 0,
+ stackval_arg_pos, local_pos, float_pos,
+ local_start, reg_param = 0, stack_param,
+ this_flag, arg_pos, fpr_param, parSize;
+ guint32 simpletype;
+ size_data sz;
+ int *vtbuf, cpos, vt_cur;
+
+ sz.code_size = 1024;
+ sz.stack_size = 1024;
+ stack_param = 0;
+ fpr_param = 0;
+ arg_pos = 0;
+
+ sig = method->signature;
+
+ p = code_buffer = g_malloc (sz.code_size);
+
+ DEBUG (printf ("\nDelegate [start emiting] %s at 0x%08x\n",
+ method->name,p));
+
+ /*----------------------------------------------------------*/
+ /* prolog */
+ /*----------------------------------------------------------*/
+ s390_stmg(p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET);
+ s390_lg (p, s390_r7, 0, STK_BASE, MINV_POS);
+ s390_lgr (p, s390_r0, STK_BASE);
+ s390_aghi(p, STK_BASE, -(sz.stack_size+MINV_POS));
+ s390_stg (p, s390_r0, 0, STK_BASE, 0);
+ s390_la (p, s390_r8, 0, STK_BASE, 4);
+ s390_lgr (p, s390_r10, s390_r8);
+ s390_lghi(p, s390_r9, sz.stack_size+92);
+ s390_lghi(p, s390_r11, 0);
+ s390_mvcl(p, s390_r8, s390_r10);
+
+ /*----------------------------------------------------------*/
+ /* Let's fill MonoInvocation - first zero some fields */
+ /*----------------------------------------------------------*/
+ s390_lghi (p, s390_r0, 0);
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)));
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)));
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)));
+ s390_lghi (p, s390_r0, 1);
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, invoke_trap)));
+
+ /*----------------------------------------------------------*/
+ /* set method pointer */
+ /*----------------------------------------------------------*/
+ s390_bras (p, s390_r13, 4);
+ s390_llong(p, method);
+ s390_lg (p, s390_r0, 0, s390_r13, 0);
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)));
+
+ local_start = local_pos = MINV_POS +
+ sizeof (MonoInvocation) + (sig->param_count + 1) * sizeof (stackval);
+ this_flag = (sig->hasthis ? 1 : 0);
+
+ /*----------------------------------------------------------*/
+ /* if we are returning a structure, checks it's length to */
+ /* see if there's a "hidden" parameter that points to the */
+ /* area. If necessary save this hidden parameter for later */
+ /*----------------------------------------------------------*/
+ if (MONO_TYPE_ISSTRUCT(sig->ret)) {
+ if (sig->pinvoke)
+ retSize = mono_class_native_size (sig->ret->data.klass, &align);
+ else
+ retSize = mono_class_value_size (sig->ret->data.klass, &align);
+ switch(retSize) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ sz.retStruct = 0;
+ break;
+ default:
+ sz.retStruct = 1;
+ s390_lgr(p, s390_r8, s390_r2);
+ reg_save = 1;
+ }
+ } else {
+ reg_save = 0;
+ }
+
+ if (this_flag) {
+ s390_stg (p, s390_r2 + reg_save, 0, STK_BASE,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)));
+ reg_param++;
+ } else {
+ s390_stg (p, s390_r2 + reg_save, 0, STK_BASE, local_pos);
+ local_pos += sizeof(int);
+ s390_stg (p, s390_r0, 0, STK_BASE,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)));
+ }
+
+ s390_stmg (p, s390_r3 + reg_param, s390_r6, STK_BASE, local_pos);
+ local_pos += 4 * sizeof(long);
+ float_pos = local_pos;
+ s390_std (p, s390_f0, 0, STK_BASE, local_pos);
+ local_pos += sizeof(double);
+ s390_std (p, s390_f2, 0, STK_BASE, local_pos);
+ local_pos += sizeof(double);
+
+ /*----------------------------------------------------------*/
+ /* prepare space for valuetypes */
+ /*----------------------------------------------------------*/
+ vt_cur = local_pos;
+ vtbuf = alloca (sizeof(int)*sig->param_count);
+ cpos = 0;
+ for (i = 0; i < sig->param_count; i++) {
+ MonoType *type = sig->params [i];
+ vtbuf [i] = -1;
+ DEBUG(printf("par: %d type: %d ref: %d\n",i,type->type,type->byref));
+ if (type->type == MONO_TYPE_VALUETYPE) {
+ MonoClass *klass = type->data.klass;
+ gint size;
+
+ if (klass->enumtype)
+ continue;
+ size = mono_class_native_size (klass, &align);
+ cpos += align - 1;
+ cpos &= ~(align - 1);
+ vtbuf [i] = cpos;
+ cpos += size;
+ }
+ }
+ cpos += 3;
+ cpos &= ~3;
+
+ local_pos += cpos;
+
+ /*----------------------------------------------------------*/
+ /* set MonoInvocation::stack_args */
+ /*----------------------------------------------------------*/
+ stackval_arg_pos = MINV_POS + sizeof (MonoInvocation);
+ s390_la (p, s390_r0, 0, STK_BASE, stackval_arg_pos);
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)));
+
+ /*----------------------------------------------------------*/
+ /* add stackval arguments */
+ /*----------------------------------------------------------*/
+ for (i = 0; i < sig->param_count; ++i) {
+ if (sig->params [i]->byref) {
+ ADD_ISTACK_PARM(0, 1);
+ } else {
+ simple_type = sig->params [i]->type;
+ enum_savechk:
+ switch (simple_type) {
+ case MONO_TYPE_I8:
+ ADD_ISTACK_PARM(-1, 2);
+ break;
+ case MONO_TYPE_R4:
+ ADD_RSTACK_PARM(1);
+ break;
+ case MONO_TYPE_R8:
+ ADD_RSTACK_PARM(2);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->params [i]->data.klass->enumtype) {
+ simple_type = sig->params [i]->data.klass->enum_basetype->type;
+ goto enum_savechk;
+ }
+ if (sig->pinvoke)
+ parSize = mono_class_native_size (sig->params [i]->data.klass, &align);
+ else
+ parSize = mono_class_value_size (sig->params [i]->data.klass, &align);
+ switch(parSize) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ ADD_PSTACK_PARM(0, 1);
+ break;
+ case 8:
+ ADD_PSTACK_PARM(-1, 2);
+ break;
+ default:
+ ADD_TSTACK_PARM;
+ }
+ break;
+ default:
+ ADD_ISTACK_PARM(0, 1);
+ }
+ }
+
+ if (vtbuf [i] >= 0) {
+ s390_la (p, s390_r3, 0, STK_BASE, vt_cur);
+ s390_stg (p, s390_r3, 0, STK_BASE, stackval_arg_pos);
+ s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos);
+ vt_cur += vtbuf [i];
+ } else {
+ s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos);
+ }
+
+ /*--------------------------------------*/
+ /* Load the parameter registers for the */
+ /* call to stackval_from_data */
+ /*--------------------------------------*/
+ s390_bras (p, s390_r13, 8);
+ s390_llong(p, sig->params [i]);
+ s390_llong(p, sig->pinvoke);
+ s390_llong(p, stackval_from_data);
+ s390_lg (p, s390_r2, 0, s390_r13, 0);
+ s390_lg (p, s390_r5, 0, s390_r13, 4);
+ s390_lg (p, s390_r1, 0, s390_r13, 8);
+ s390_basr (p, s390_r14, s390_r1);
+
+ stackval_arg_pos += sizeof(stackval);
+
+ /* fixme: alignment */
+ DEBUG (printf ("arg_pos %d --> ", arg_pos));
+ if (sig->pinvoke)
+ arg_pos += mono_type_native_stack_size (sig->params [i], &align);
+ else
+ arg_pos += mono_type_stack_size (sig->params [i], &align);
+
+ DEBUG (printf ("%d\n", stackval_arg_pos));
+ }
+
+ /*----------------------------------------------------------*/
+ /* Set return area pointer. */
+ /*----------------------------------------------------------*/
+ s390_la (p, s390_r10, 0, STK_BASE, stackval_arg_pos);
+ s390_stg(p, s390_r10, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)));
+ if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) {
+ MonoClass *klass = sig->ret->data.klass;
+ if (!klass->enumtype) {
+ s390_la (p, s390_r9, 0, s390_r10, sizeof(stackval));
+ s390_st (p, s390_r9, 0,STK_BASE, stackval_arg_pos);
+ stackval_arg_pos += sizeof(stackval);
+ }
+ }
+
+ /*----------------------------------------------------------*/
+ /* call ves_exec_method */
+ /*----------------------------------------------------------*/
+ s390_bras (p, s390_r13, 4);
+ s390_llong(p, ves_exec_method);
+ s390_lg (p, s390_r1, 0, s390_r13, 0);
+ s390_la (p, s390_r2, 0, STK_BASE, MINV_POS);
+ s390_basr (p, s390_r14, s390_r1);
+
+ /*----------------------------------------------------------*/
+ /* move retval from stackval to proper place (r3/r4/...) */
+ /*----------------------------------------------------------*/
+ DEBUG(printf("retType: %d byRef: %d\n",sig->ret->type,sig->ret->byref));
+ if (sig->ret->byref) {
+ DEBUG (printf ("ret by ref\n"));
+ s390_stg(p, s390_r2, 0, s390_r10, 0);
+ } else {
+ enum_retvalue:
+ switch (sig->ret->type) {
+ case MONO_TYPE_VOID:
+ break;
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_U1:
+ s390_lghi(p, s390_r2, 0);
+ s390_ic (p, s390_r2, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ s390_lh (p, s390_r2, 0,s390_r10, 0);
+ break;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ s390_lgf(p, s390_r2, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_I8:
+ s390_lg (p, s390_r2, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_R4:
+ s390_le (p, s390_f0, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_R8:
+ s390_ld (p, s390_f0, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ /*---------------------------------*/
+ /* Call stackval_to_data to return */
+ /* the structure */
+ /*---------------------------------*/
+ s390_bras (p, s390_r13, 8);
+ s390_llong(p, sig->ret);
+ s390_llong(p, sig->pinvoke);
+ s390_llong(p, stackval_to_data);
+ s390_lg (p, s390_r2, 0, s390_r13, 0);
+ s390_lg (p, s390_r3, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)));
+ if (sz.retStruct) {
+ /*------------------------------------------*/
+ /* Get stackval_to_data to set result area */
+ /*------------------------------------------*/
+ s390_lgr (p, s390_r4, s390_r8);
+ } else {
+ /*------------------------------------------*/
+ /* Give stackval_to_data a temp result area */
+ /*------------------------------------------*/
+ s390_la (p, s390_r4, 0, STK_BASE, stackval_arg_pos);
+ }
+ s390_lg (p, s390_r5, 0,s390_r13, 4);
+ s390_lg (p, s390_r1, 0, s390_r13, 8);
+ s390_basr (p, s390_r14, s390_r1);
+ switch (retSize) {
+ case 0:
+ break;
+ case 1:
+ s390_lghi(p, s390_r2, 0);
+ s390_ic (p, s390_r2, 0, s390_r10, 0);
+ break;
+ case 2:
+ s390_lh (p, s390_r2, 0, s390_r10, 0);
+ break;
+ case 4:
+ s390_lgf(p, s390_r2, 0, s390_r10, 0);
+ break;
+ case 8:
+ s390_lg (p, s390_r2, 0, s390_r10, 0);
+ break;
+ default: ;
+ /*-------------------------------------------------*/
+ /* stackval_to_data has placed data in result area */
+ /*-------------------------------------------------*/
+ }
+ break;
+ default:
+ g_error ("Type 0x%x not handled yet in thunk creation",
+ sig->ret->type);
+ break;
+ }
+ }
+
+ /*----------------------------------------------------------*/
+ /* epilog */
+ /*----------------------------------------------------------*/
+ s390_lg (p, STK_BASE, 0, STK_BASE, 0);
+ s390_lg (p, s390_r4, 0, STK_BASE, S390_RET_ADDR_OFFSET);
+ s390_lmg (p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET);
+ s390_br (p, s390_r4);
+
+ DEBUG (printf ("emited code size: %d\n", p - code_buffer));
+
+ DEBUG (printf ("Delegate [end emiting]\n"));
+
+ ji = g_new0 (MonoJitInfo, 1);
+ ji->method = method;
+ ji->code_size = p - code_buffer;
+ ji->code_start = code_buffer;
+
+ mono_jit_info_table_add (mono_get_root_domain (), ji);
+
+ return ji->code_start;
+}
+
+/*========================= End of Function ========================*/
diff --git a/lib/ffts/src/arch/sparc/.gitignore b/lib/ffts/src/arch/sparc/.gitignore
new file mode 100644
index 0000000..dc1ebd2
--- /dev/null
+++ b/lib/ffts/src/arch/sparc/.gitignore
@@ -0,0 +1,3 @@
+/Makefile
+/Makefile.in
+/.deps
diff --git a/lib/ffts/src/arch/sparc/Makefile.am b/lib/ffts/src/arch/sparc/Makefile.am
new file mode 100644
index 0000000..a888904
--- /dev/null
+++ b/lib/ffts/src/arch/sparc/Makefile.am
@@ -0,0 +1,7 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-sparc.la
+
+libmonoarch_sparc_la_SOURCES = tramp.c sparc-codegen.h
+
diff --git a/lib/ffts/src/arch/sparc/sparc-codegen.h b/lib/ffts/src/arch/sparc/sparc-codegen.h
new file mode 100644
index 0000000..eb421bb
--- /dev/null
+++ b/lib/ffts/src/arch/sparc/sparc-codegen.h
@@ -0,0 +1,955 @@
+#ifndef __SPARC_CODEGEN_H__
+#define __SPARC_CODEGEN_H__
+
+#if SIZEOF_VOID_P == 8
+#define SPARCV9 1
+#else
+#endif
+
+typedef enum {
+ sparc_r0 = 0,
+ sparc_r1 = 1,
+ sparc_r2 = 2,
+ sparc_r3 = 3,
+ sparc_r4 = 4,
+ sparc_r5 = 5,
+ sparc_r6 = 6,
+ sparc_r7 = 7,
+ sparc_r8 = 8,
+ sparc_r9 = 9,
+ sparc_r10 = 10,
+ sparc_r11 = 11,
+ sparc_r12 = 12,
+ sparc_r13 = 13,
+ sparc_r14 = 14,
+ sparc_r15 = 15,
+ sparc_r16 = 16,
+ sparc_r17 = 17,
+ sparc_r18 = 18,
+ sparc_r19 = 19,
+ sparc_r20 = 20,
+ sparc_r21 = 21,
+ sparc_r22 = 22,
+ sparc_r23 = 23,
+ sparc_r24 = 24,
+ sparc_r25 = 25,
+ sparc_r26 = 26,
+ sparc_r27 = 27,
+ sparc_r28 = 28,
+ sparc_r29 = 29,
+ sparc_r30 = 30,
+ sparc_r31 = 31,
+ /* aliases */
+ /* global registers */
+ sparc_g0 = 0, sparc_zero = 0,
+ sparc_g1 = 1,
+ sparc_g2 = 2,
+ sparc_g3 = 3,
+ sparc_g4 = 4,
+ sparc_g5 = 5,
+ sparc_g6 = 6,
+ sparc_g7 = 7,
+ /* out registers */
+ sparc_o0 = 8,
+ sparc_o1 = 9,
+ sparc_o2 = 10,
+ sparc_o3 = 11,
+ sparc_o4 = 12,
+ sparc_o5 = 13,
+ sparc_o6 = 14, sparc_sp = 14,
+ sparc_o7 = 15, sparc_callsite = 15,
+ /* local registers */
+ sparc_l0 = 16,
+ sparc_l1 = 17,
+ sparc_l2 = 18,
+ sparc_l3 = 19,
+ sparc_l4 = 20,
+ sparc_l5 = 21,
+ sparc_l6 = 22,
+ sparc_l7 = 23,
+ /* in registers */
+ sparc_i0 = 24,
+ sparc_i1 = 25,
+ sparc_i2 = 26,
+ sparc_i3 = 27,
+ sparc_i4 = 28,
+ sparc_i5 = 29,
+ sparc_i6 = 30, sparc_fp = 30,
+ sparc_i7 = 31,
+ sparc_nreg = 32,
+ /* floating point registers */
+ sparc_f0 = 0,
+ sparc_f1 = 1,
+ sparc_f2 = 2,
+ sparc_f3 = 3,
+ sparc_f4 = 4,
+ sparc_f5 = 5,
+ sparc_f6 = 6,
+ sparc_f7 = 7,
+ sparc_f8 = 8,
+ sparc_f9 = 9,
+ sparc_f10 = 10,
+ sparc_f11 = 11,
+ sparc_f12 = 12,
+ sparc_f13 = 13,
+ sparc_f14 = 14,
+ sparc_f15 = 15,
+ sparc_f16 = 16,
+ sparc_f17 = 17,
+ sparc_f18 = 18,
+ sparc_f19 = 19,
+ sparc_f20 = 20,
+ sparc_f21 = 21,
+ sparc_f22 = 22,
+ sparc_f23 = 23,
+ sparc_f24 = 24,
+ sparc_f25 = 25,
+ sparc_f26 = 26,
+ sparc_f27 = 27,
+ sparc_f28 = 28,
+ sparc_f29 = 29,
+ sparc_f30 = 30,
+ sparc_f31 = 31,
+} SparcRegister;
+
+typedef enum {
+ sparc_bn = 0, sparc_bnever = 0,
+ sparc_be = 1,
+ sparc_ble = 2,
+ sparc_bl = 3,
+ sparc_bleu = 4,
+ sparc_bcs = 5, sparc_blu = 5,
+ sparc_bneg = 6,
+ sparc_bvs = 7, sparc_boverflow = 7,
+ sparc_ba = 8, sparc_balways = 8,
+ sparc_bne = 9,
+ sparc_bg = 10,
+ sparc_bge = 11,
+ sparc_bgu = 12,
+ sparc_bcc = 13, sparc_beu = 13,
+ sparc_bpos = 14,
+ sparc_bvc = 15
+} SparcCond;
+
+typedef enum {
+ /* with fcmp */
+ sparc_feq = 0,
+ sparc_fl = 1,
+ sparc_fg = 2,
+ sparc_unordered = 3,
+ /* branch ops */
+ sparc_fba = 8,
+ sparc_fbn = 0,
+ sparc_fbu = 7,
+ sparc_fbg = 6,
+ sparc_fbug = 5,
+ sparc_fbl = 4,
+ sparc_fbul = 3,
+ sparc_fblg = 2,
+ sparc_fbne = 1,
+ sparc_fbe = 9,
+ sparc_fbue = 10,
+ sparc_fbge = 11,
+ sparc_fbuge = 12,
+ sparc_fble = 13,
+ sparc_fbule = 14,
+ sparc_fbo = 15
+} SparcFCond;
+
+typedef enum {
+ sparc_icc = 4,
+ sparc_xcc = 6,
+ sparc_fcc0 = 0,
+ sparc_fcc1 = 1,
+ sparc_fcc2 = 2,
+ sparc_fcc3 = 3
+} SparcCC;
+
+typedef enum {
+ sparc_icc_short = 0,
+ sparc_xcc_short = 2
+} SparcCCShort;
+
+typedef enum {
+ /* fop1 format */
+ sparc_fitos_val = 196,
+ sparc_fitod_val = 200,
+ sparc_fitoq_val = 204,
+ sparc_fxtos_val = 132,
+ sparc_fxtod_val = 136,
+ sparc_fxtoq_val = 140,
+ sparc_fstoi_val = 209,
+ sparc_fdtoi_val = 210,
+ sparc_fqtoi_val = 211,
+ sparc_fstod_val = 201,
+ sparc_fstoq_val = 205,
+ sparc_fdtos_val = 198,
+ sparc_fdtoq_val = 206,
+ sparc_fqtos_val = 199,
+ sparc_fqtod_val = 203,
+ sparc_fmovs_val = 1,
+ sparc_fmovd_val = 2,
+ sparc_fnegs_val = 5,
+ sparc_fnegd_val = 6,
+ sparc_fabss_val = 9,
+ sparc_fabsd_val = 10,
+ sparc_fsqrts_val = 41,
+ sparc_fsqrtd_val = 42,
+ sparc_fsqrtq_val = 43,
+ sparc_fadds_val = 65,
+ sparc_faddd_val = 66,
+ sparc_faddq_val = 67,
+ sparc_fsubs_val = 69,
+ sparc_fsubd_val = 70,
+ sparc_fsubq_val = 71,
+ sparc_fmuls_val = 73,
+ sparc_fmuld_val = 74,
+ sparc_fmulq_val = 75,
+ sparc_fsmuld_val = 105,
+ sparc_fdmulq_val = 111,
+ sparc_fdivs_val = 77,
+ sparc_fdivd_val = 78,
+ sparc_fdivq_val = 79,
+ /* fop2 format */
+ sparc_fcmps_val = 81,
+ sparc_fcmpd_val = 82,
+ sparc_fcmpq_val = 83,
+ sparc_fcmpes_val = 85,
+ sparc_fcmped_val = 86,
+ sparc_fcmpeq_val = 87
+} SparcFOp;
+
+typedef enum {
+ sparc_membar_load_load = 0x1,
+ sparc_membar_store_load = 0x2,
+ sparc_membar_load_store = 0x4,
+ sparc_membar_store_store = 0x8,
+
+ sparc_membar_lookaside = 0x10,
+ sparc_membar_memissue = 0x20,
+ sparc_membar_sync = 0x40,
+
+ sparc_membar_all = 0x4f
+} SparcMembarFlags;
+
+typedef struct {
+ unsigned int op : 2; /* always 1 */
+ unsigned int disp : 30;
+} sparc_format1;
+
+typedef struct {
+ unsigned int op : 2; /* always 0 */
+ unsigned int rd : 5;
+ unsigned int op2 : 3;
+ unsigned int disp : 22;
+} sparc_format2a;
+
+typedef struct {
+ unsigned int op : 2; /* always 0 */
+ unsigned int a : 1;
+ unsigned int cond : 4;
+ unsigned int op2 : 3;
+ unsigned int disp : 22;
+} sparc_format2b;
+
+typedef struct {
+ unsigned int op : 2; /* always 0 */
+ unsigned int a : 1;
+ unsigned int cond : 4;
+ unsigned int op2 : 3;
+ unsigned int cc01 : 2;
+ unsigned int p : 1;
+ unsigned int d19 : 19;
+} sparc_format2c;
+
+typedef struct {
+ unsigned int op : 2; /* always 0 */
+ unsigned int a : 1;
+ unsigned int res : 1;
+ unsigned int rcond: 3;
+ unsigned int op2 : 3;
+ unsigned int d16hi: 2;
+ unsigned int p : 1;
+ unsigned int rs1 : 5;
+ unsigned int d16lo: 14;
+} sparc_format2d;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int asi : 8;
+ unsigned int rs2 : 5;
+} sparc_format3a;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int x : 1;
+ unsigned int asi : 7;
+ unsigned int rs2 : 5;
+} sparc_format3ax;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int imm : 13;
+} sparc_format3b;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int x : 1;
+ unsigned int imm : 12;
+} sparc_format3bx;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int opf : 9;
+ unsigned int rs2 : 5;
+} sparc_format3c;
+
+typedef struct {
+ unsigned int op : 2;
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int cc01 : 2;
+ unsigned int res : 6;
+ unsigned int rs2 : 5;
+} sparc_format4a;
+
+typedef struct {
+ unsigned int op : 2;
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int cc01 : 2;
+ unsigned int simm : 11;
+} sparc_format4b;
+
+typedef struct {
+ unsigned int op : 2;
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int cc2 : 1;
+ unsigned int cond : 4;
+ unsigned int i : 1;
+ unsigned int cc01 : 2;
+ unsigned int res : 6;
+ unsigned int rs2 : 5;
+} sparc_format4c;
+
+typedef struct {
+ unsigned int op : 2;
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int cc2 : 1;
+ unsigned int cond : 4;
+ unsigned int i : 1;
+ unsigned int cc01 : 2;
+ unsigned int simm : 11;
+} sparc_format4d;
+
+/* for use in logical ops, use 0 to not set flags */
+#define sparc_cc 16
+
+#define sparc_is_imm13(val) ((glong)val >= (glong)-(1<<12) && (glong)val <= (glong)((1<<12)-1))
+#define sparc_is_imm22(val) ((glong)val >= (glong)-(1<<21) && (glong)val <= (glong)((1<<21)-1))
+#define sparc_is_imm16(val) ((glong)val >= (glong)-(1<<15) && (glong)val <= (glong)((1<<15)-1))
+#define sparc_is_imm19(val) ((glong)val >= (glong)-(1<<18) && (glong)val <= (glong)((1<<18)-1))
+#define sparc_is_imm30(val) ((glong)val >= (glong)-(1<<29) && (glong)val <= (glong)((1<<29)-1))
+
+/* disassembly */
+#define sparc_inst_op(inst) ((inst) >> 30)
+#define sparc_inst_op2(inst) (((inst) >> 22) & 0x7)
+#define sparc_inst_rd(inst) (((inst) >> 25) & 0x1f)
+#define sparc_inst_op3(inst) (((inst) >> 19) & 0x3f)
+#define sparc_inst_i(inst) (((inst) >> 13) & 0x1)
+#define sparc_inst_rs1(inst) (((inst) >> 14) & 0x1f)
+#define sparc_inst_rs2(inst) (((inst) >> 0) & 0x1f)
+#define sparc_inst_imm(inst) (((inst) >> 13) & 0x1)
+#define sparc_inst_imm13(inst) (((inst) >> 0) & 0x1fff)
+
+#define sparc_encode_call(ins,addr) \
+ do { \
+ sparc_format1 *__f = (sparc_format1*)(ins); \
+ __f->op = 1; \
+ __f->disp = ((unsigned int)(addr) >> 2); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format2a(ins,val,oper,dest) \
+ do { \
+ sparc_format2a *__f = (sparc_format2a*)(ins); \
+ __f->op = 0; \
+ __f->rd = (dest); \
+ __f->op2 = (oper); \
+ __f->disp = (val) & 0x3fffff; \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format2b(ins,aval,bcond,oper,disp22) \
+ do { \
+ sparc_format2b *__f = (sparc_format2b*)(ins); \
+ __f->op = 0; \
+ __f->a = (aval); \
+ __f->cond = (bcond); \
+ __f->op2 = (oper); \
+ __f->disp = (disp22); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format2c(ins,aval,bcond,oper,xcc,predict,disp19) \
+ do { \
+ sparc_format2c *__f = (sparc_format2c*)(ins); \
+ __f->op = 0; \
+ __f->a = (aval); \
+ __f->cond = (bcond); \
+ __f->op2 = (oper); \
+ __f->cc01 = (xcc); \
+ __f->p = (predict); \
+ __f->d19 = (disp19); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format2d(ins,aval,bcond,oper,predict,r1,disp16) \
+ do { \
+ sparc_format2d *__f = (sparc_format2d*)(ins); \
+ __f->op = 0; \
+ __f->a = (aval); \
+ __f->res = 0; \
+ __f->rcond = (bcond); \
+ __f->op2 = (oper); \
+ __f->d16hi = ((disp16) >> 14); \
+ __f->p = (predict); \
+ __f->rs1 = (r1); \
+ __f->d16lo = ((disp16) & 0x3fff); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3a(ins,opval,asival,r1,r2,oper,dest) \
+ do { \
+ sparc_format3a *__f = (sparc_format3a*)(ins); \
+ __f->op = (opval); \
+ __f->asi = (asival); \
+ __f->i = 0; \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->rs2 = (r2); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3ax(ins,opval,asival,r1,r2,oper,dest) \
+ do { \
+ sparc_format3ax *__f = (sparc_format3ax*)(ins); \
+ __f->op = (opval); \
+ __f->asi = (asival); \
+ __f->i = 0; \
+ __f->x = 1; \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->rs2 = (r2); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3b(ins,opval,r1,val,oper,dest) \
+ do { \
+ sparc_format3b *__f = (sparc_format3b*)(ins); \
+ __f->op = (opval); \
+ __f->imm = (val); \
+ __f->i = 1; \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3bx(ins,opval,r1,val,oper,dest) \
+ do { \
+ sparc_format3bx *__f = (sparc_format3bx*)(ins); \
+ __f->op = (opval); \
+ __f->imm = (val); \
+ __f->i = 1; \
+ __f->x = 1; \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3c(ins,opval,opfval,r1,oper,r2,dest) \
+ do { \
+ sparc_format3c *__f = (sparc_format3c*)(ins); \
+ __f->op = (opval); \
+ __f->opf = (opfval); \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->rs2 = (r2); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format4a(ins,opval,oper,cc,r1,r2,dest) \
+ do { \
+ sparc_format4a *__f = (sparc_format4a*)(ins); \
+ __f->op = (opval); \
+ __f->rd = (dest); \
+ __f->op3 = (oper); \
+ __f->rs1 = (r1); \
+ __f->i = 0; \
+ __f->cc01= (cc) & 0x3; \
+ __f->res = 0; \
+ __f->rs2 = (r2); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format4b(ins,opval,oper,cc,r1,imm,dest) \
+ do { \
+ sparc_format4b *__f = (sparc_format4b*)(ins); \
+ __f->op = (opval); \
+ __f->rd = (dest); \
+ __f->op3 = (oper); \
+ __f->rs1 = (r1); \
+ __f->i = 1; \
+ __f->cc01= (cc) & 0x3; \
+ __f->simm = (imm); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format4c(ins,opval,oper,cc,bcond,r2,dest) \
+ do { \
+ sparc_format4c *__f = (sparc_format4c*)(ins); \
+ __f->op = (opval); \
+ __f->rd = (dest); \
+ __f->op3 = (oper); \
+ __f->cc2 = ((xcc) >> 2) & 0x1; \
+ __f->cond = bcond; \
+ __f->i = 0; \
+ __f->cc01= (xcc) & 0x3; \
+ __f->res = 0; \
+ __f->rs2 = (r2); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format4d(ins,opval,oper,xcc,bcond,imm,dest) \
+ do { \
+ sparc_format4d *__f = (sparc_format4d*)(ins); \
+ __f->op = (opval); \
+ __f->rd = (dest); \
+ __f->op3 = (oper); \
+ __f->cc2 = ((xcc) >> 2) & 0x1; \
+ __f->cond = bcond; \
+ __f->i = 1; \
+ __f->cc01= (xcc) & 0x3; \
+ __f->simm = (imm); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+/* is it useful to provide a non-default value? */
+#define sparc_asi 0x0
+
+/* load */
+#define sparc_ldsb(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),9,(dest))
+#define sparc_ldsb_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),9,(dest))
+
+#define sparc_ldsh(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),10,(dest))
+#define sparc_ldsh_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),10,(dest))
+
+#define sparc_ldub(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),1,(dest))
+#define sparc_ldub_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),1,(dest))
+
+#define sparc_lduh(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),2,(dest))
+#define sparc_lduh_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),2,(dest))
+
+#define sparc_ld(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),0,(dest))
+#define sparc_ld_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),0,(dest))
+
+/* Sparc V9 */
+#define sparc_ldx(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),11,(dest))
+#define sparc_ldx_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),11,(dest))
+
+#define sparc_ldsw(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),8,(dest))
+#define sparc_ldsw_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),8,(dest))
+
+#define sparc_ldd(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),3,(dest))
+#define sparc_ldd_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),3,(dest))
+
+#define sparc_ldf(ins,base,disp,dest) sparc_encode_format3a((ins),3,0,(base),(disp),32,(dest))
+#define sparc_ldf_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),32,(dest))
+
+#define sparc_lddf(ins,base,disp,dest) sparc_encode_format3a((ins),3,0,(base),(disp),35,(dest))
+#define sparc_lddf_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),35,(dest))
+
+/* store */
+#define sparc_stb(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),5,(src))
+#define sparc_stb_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),5,(src))
+
+#define sparc_sth(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),6,(src))
+#define sparc_sth_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),6,(src))
+
+#define sparc_st(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),4,(src))
+#define sparc_st_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),4,(src))
+
+/* Sparc V9 */
+#define sparc_stx(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),14,(src))
+#define sparc_stx_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),14,(src))
+
+#define sparc_std(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),7,(src))
+#define sparc_std_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),7,(src))
+
+#define sparc_stf(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),36,(src))
+#define sparc_stf_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),36,(src))
+
+#define sparc_stdf(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),39,(src))
+#define sparc_stdf_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),39,(src))
+
+/* swap */
+#define sparc_ldstub(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),13,(dest))
+#define sparc_ldstub_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),13,(dest))
+
+#define sparc_swap(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),15,(dest))
+#define sparc_swap_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),15,(dest))
+
+/* misc */
+/* note: with sethi val is the full 32 bit value (think of it as %hi(val)) */
+#define sparc_sethi(ins,val,dest) sparc_encode_format2a((ins),((val)>>10),4,(dest))
+
+#define sparc_nop(ins) sparc_sethi((ins),0,sparc_zero)
+
+#define sparc_save(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),60,(dest))
+#define sparc_save_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),60,(dest))
+
+#define sparc_restore(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),61,(dest))
+#define sparc_restore_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),61,(dest))
+
+#define sparc_rett(ins,src,disp) sparc_encode_format3a((ins),2,0,(src),(disp),0x39,0)
+#define sparc_rett_imm(ins,src,disp) sparc_encode_format3b((ins),2,(src),(disp),0x39,0)
+
+#define sparc_jmpl(ins,base,disp,dest) sparc_encode_format3a((ins),2,0,(base),(disp),56,(dest))
+#define sparc_jmpl_imm(ins,base,disp,dest) sparc_encode_format3b((ins),2,(base),(disp),56,(dest))
+
+#define sparc_call_simple(ins,disp) sparc_encode_call((ins),((unsigned int)(disp)))
+
+#define sparc_rdy(ins,dest) sparc_encode_format3a((ins),2,0,0,0,40,(dest))
+
+#define sparc_wry(ins,base,disp) sparc_encode_format3a((ins),2,0,(base),(disp),48,0)
+#define sparc_wry_imm(ins,base,disp) sparc_encode_format3b((ins),2,(base),(disp),48,0)
+
+/* stbar, unimp, flush */
+#define sparc_stbar(ins) sparc_encode_format3a((ins),2,0,15,0,40,0)
+#define sparc_unimp(ins,val) sparc_encode_format2b((ins),0,0,0,(val))
+
+#define sparc_flush(ins,base,disp) sparc_encode_format3a((ins),2,0,(base),(disp),59,0)
+#define sparc_flush_imm(ins,base,disp) sparc_encode_format3b((ins),2,(base),(disp),59,0)
+
+#define sparc_flushw(ins) sparc_encode_format3a((ins),2,0,0,0,43,0)
+
+#define sparc_membar(ins,flags) sparc_encode_format3b ((ins), 2, 0xf, (flags), 0x28, 0)
+
+/* trap */
+
+#define sparc_ta(ins,tt) sparc_encode_format3b((ins),2,0,(tt),58,0x8)
+
+/* alu fop */
+/* provide wrappers for: fitos, fitod, fstoi, fdtoi, fstod, fdtos, fmov, fneg, fabs */
+
+#define sparc_fop(ins,r1,op,r2,dest) sparc_encode_format3c((ins),2,(op),(r1),52,(r2),(dest))
+#define sparc_fcmp(ins,r1,op,r2) sparc_encode_format3c((ins),2,(op),(r1),53,(r2),0)
+
+/* format 1 fops */
+#define sparc_fadds(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fadds_val, r2, dest )
+#define sparc_faddd(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_faddd_val, r2, dest )
+#define sparc_faddq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_faddq_val, r2, dest )
+
+#define sparc_fsubs(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubs_val, r2, dest )
+#define sparc_fsubd(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubd_val, r2, dest )
+#define sparc_fsubq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubq_val, r2, dest )
+
+#define sparc_fmuls( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmuls_val, r2, dest )
+#define sparc_fmuld( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmuld_val, r2, dest )
+#define sparc_fmulq( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmulq_val, r2, dest )
+
+#define sparc_fsmuld( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fsmuld_val, r2, dest )
+#define sparc_fdmulq( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdmulq_val, r2, dest )
+
+#define sparc_fdivs( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdivs_val, r2, dest )
+#define sparc_fdivd( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdivd_val, r2, dest )
+#define sparc_fdivq( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdivq_val, r2, dest )
+
+#define sparc_fitos( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitos_val, r2, dest )
+#define sparc_fitod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitod_val, r2, dest )
+#define sparc_fitoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitoq_val, r2, dest )
+
+#define sparc_fxtos( ins, r2, dest) sparc_fop( ins, 0, sparc_fxtos_val, r2, dest )
+#define sparc_fxtod( ins, r2, dest) sparc_fop( ins, 0, sparc_fxtod_val, r2, dest )
+#define sparc_fxtoq( ins, r2, dest) sparc_fop( ins, 0, sparc_fxtoq_val, r2, dest )
+
+#define sparc_fstoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstoi_val, r2, dest )
+#define sparc_fdtoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtoi_val, r2, dest )
+#define sparc_fqtoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtoi_val, r2, dest )
+
+#define sparc_fstod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstod_val, r2, dest )
+#define sparc_fstoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstoq_val, r2, dest )
+
+#define sparc_fdtos( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtos_val, r2, dest )
+#define sparc_fdtoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtoq_val, r2, dest )
+
+#define sparc_fqtos( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtos_val, r2, dest )
+#define sparc_fqtod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtod_val, r2, dest )
+
+#define sparc_fmovs( ins, r2, dest ) sparc_fop( ins, 0, sparc_fmovs_val, r2, dest )
+#define sparc_fnegs( ins, r2, dest ) sparc_fop( ins, 0, sparc_fnegs_val, r2, dest )
+#define sparc_fabss( ins, r2, dest ) sparc_fop( ins, 0, sparc_fabss_val, r2, dest )
+
+#define sparc_fmovd( ins, r2, dest) sparc_fop (ins, 0, sparc_fmovd_val, r2, dest);
+#define sparc_fnegd( ins, r2, dest) sparc_fop (ins, 0, sparc_fnegd_val, r2, dest);
+#define sparc_fabsd( ins, r2, dest) sparc_fop (ins, 0, sparc_fabsd_val, r2, dest);
+
+#define sparc_fsqrts( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrts_val, r2, dest )
+#define sparc_fsqrtd( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrtd_val, r2, dest )
+#define sparc_fsqrtq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrtq_val, r2, dest )
+
+/* format 2 fops */
+
+#define sparc_fcmps( ins, r1, r2 ) sparc_fcmp( ins, r1, sparc_fcmps_val, r2 )
+#define sparc_fcmpd( ins, r1, r2 ) sparc_fcmp( ins, r1, sparc_fcmpd_val, r2 )
+#define sparc_fcmpq( ins, r1, r2 ) sparc_fcmp( ins, r1, sparc_fcmpq_val, r2 )
+#define sparc_fcmpes( ins, r1, r2 ) sparc_fcmpes( ins, r1, sparc_fcmpes_val, r2 )
+#define sparc_fcmped( ins, r1, r2 ) sparc_fcmped( ins, r1, sparc_fcmped_val, r2 )
+#define sparc_fcmpeq( ins, r1, r2 ) sparc_fcmpeq( ins, r1, sparc_fcmpeq_val, r2 )
+
+/* logical */
+
+/* FIXME: condense this using macros */
+/* FIXME: the setcc stuff is wrong in lots of places */
+
+#define sparc_logic(ins,op,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),((setcc) ? 0x10 : 0) | (op), (dest))
+#define sparc_logic_imm(ins,op,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),((setcc) ? 0x10 : 0) | (op), (dest))
+
+#define sparc_and(ins,setcc,r1,r2,dest) sparc_logic(ins,1,setcc,r1,r2,dest)
+#define sparc_and_imm(ins,setcc,r1,imm,dest) sparc_logic_imm(ins,1,setcc,r1,imm,dest)
+
+#define sparc_andn(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|5,(dest))
+#define sparc_andn_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|5,(dest))
+
+#define sparc_or(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|2,(dest))
+#define sparc_or_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|2,(dest))
+
+#define sparc_orn(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|6,(dest))
+#define sparc_orn_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|6,(dest))
+
+#define sparc_xor(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|3,(dest))
+#define sparc_xor_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm), (setcc)|3,(dest))
+
+#define sparc_xnor(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|7,(dest))
+#define sparc_xnor_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|7,(dest))
+
+/* shift */
+#define sparc_sll(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),37,(dest))
+#define sparc_sll_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),37,(dest))
+
+/* Sparc V9 */
+#define sparc_sllx(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),37,(dest))
+#define sparc_sllx_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),37,(dest))
+
+#define sparc_srl(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),38,(dest))
+#define sparc_srl_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),38,(dest))
+
+/* Sparc V9 */
+#define sparc_srlx(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),38,(dest))
+#define sparc_srlx_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),38,(dest))
+
+#define sparc_sra(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),39,(dest))
+#define sparc_sra_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),39,(dest))
+
+/* Sparc V9 */
+#define sparc_srax(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),39,(dest))
+#define sparc_srax_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),39,(dest))
+
+/* alu */
+
+#define sparc_alu_reg(ins,op,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),op|((setcc) ? 0x10 : 0),(dest))
+#define sparc_alu_imm(ins,op,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),op|((setcc) ? 0x10 : 0),(dest))
+
+#define sparc_add(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0,(setcc),(r1),(r2),(dest))
+#define sparc_add_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0,(setcc),(r1),(imm),(dest))
+
+#define sparc_addx(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0x8,(setcc),(r1),(r2),(dest))
+#define sparc_addx_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0x8,(setcc),(r1),(imm),(dest))
+
+#define sparc_sub(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0x4,(setcc),(r1),(r2),(dest))
+#define sparc_sub_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0x4,(setcc),(r1),(imm),(dest))
+
+#define sparc_subx(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xc,(setcc),(r1),(r2),(dest))
+#define sparc_subx_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xc,(setcc),(r1),(imm),(dest))
+
+#define sparc_muls(ins,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),36,(dest))
+#define sparc_muls_imm(ins,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),36,(dest))
+
+#define sparc_umul(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xa,(setcc),(r1),(r2),(dest))
+#define sparc_umul_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xa,(setcc),(r1),(imm),(dest))
+
+#define sparc_smul(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xb,(setcc),(r1),(r2),(dest))
+#define sparc_smul_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xb,(setcc),(r1),(imm),(dest))
+
+#define sparc_udiv(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xe,(setcc),(r1),(r2),(dest))
+#define sparc_udiv_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xe,(setcc),(r1),(imm),(dest))
+
+#define sparc_sdiv(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xf,(setcc),(r1),(r2),(dest))
+#define sparc_sdiv_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xf,(setcc),(r1),(imm),(dest))
+
+
+/* branch */
+#define sparc_branch(ins,aval,condval,displ) sparc_encode_format2b((ins),(aval),(condval),2,(displ))
+/* FIXME: float condition codes are different: unify. */
+#define sparc_fbranch(ins,aval,condval,displ) sparc_encode_format2b((ins),(aval),(condval),6,(displ))
+#define sparc_branchp(ins,aval,condval,xcc,predict,displ) sparc_encode_format2c((ins),(aval),(condval),0x1,(xcc),(predict),(displ))
+
+#define sparc_brz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x1,0x3,(predict),(rs1),(disp))
+#define sparc_brlez(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x2,0x3,(predict),(rs1),(disp))
+#define sparc_brlz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x3,0x3,(predict),(rs1),(disp))
+#define sparc_brnz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x5,0x3,(predict),(rs1),(disp))
+#define sparc_brgz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x6,0x3,(predict),(rs1),(disp))
+#define sparc_brgez(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x7,0x3,(predict),(rs1),(disp))
+
+/* conditional moves */
+#define sparc_movcc(ins,cc,condval,r1,dest) sparc_encode_format4c((ins), 0x2, 0x2c, cc, condval, r1, dest)
+
+#define sparc_movcc_imm(ins,cc,condval,imm,dest) sparc_encode_format4d((ins), 0x2, 0x2c, cc, condval, imm, dest)
+
+/* synthetic instructions */
+#define sparc_cmp(ins,r1,r2) sparc_sub((ins),sparc_cc,(r1),(r2),sparc_g0)
+#define sparc_cmp_imm(ins,r1,imm) sparc_sub_imm((ins),sparc_cc,(r1),(imm),sparc_g0)
+#define sparc_jmp(ins,base,disp) sparc_jmpl((ins),(base),(disp),sparc_g0)
+#define sparc_jmp_imm(ins,base,disp) sparc_jmpl_imm((ins),(base),(disp),sparc_g0)
+#define sparc_call(ins,base,disp) sparc_jmpl((ins),(base),(disp),sparc_o7)
+#define sparc_call_imm(ins,base,disp) sparc_jmpl_imm((ins),(base),(disp),sparc_o7)
+
+#define sparc_test(ins,reg) sparc_or ((ins),sparc_cc,sparc_g0,(reg),sparc_g0)
+
+#define sparc_ret(ins) sparc_jmpl_imm((ins),sparc_i7,8,sparc_g0)
+#define sparc_retl(ins) sparc_jmpl_imm((ins),sparc_o7,8,sparc_g0)
+#define sparc_restore_simple(ins) sparc_restore((ins),sparc_g0,sparc_g0,sparc_g0)
+#define sparc_rett_simple(ins) sparc_rett_imm((ins),sparc_i7,8)
+
+#define sparc_set32(ins,val,reg) \
+ do { \
+ if ((val) == 0) \
+ sparc_clr_reg((ins),(reg)); \
+ else if (((guint32)(val) & 0x3ff) == 0) \
+ sparc_sethi((ins),(guint32)(val),(reg)); \
+ else if (((gint32)(val) >= -4096) && ((gint32)(val) <= 4095)) \
+ sparc_or_imm((ins),FALSE,sparc_g0,(gint32)(val),(reg)); \
+ else { \
+ sparc_sethi((ins),(guint32)(val),(reg)); \
+ sparc_or_imm((ins),FALSE,(reg),(guint32)(val)&0x3ff,(reg)); \
+ } \
+ } while (0)
+
+#ifdef SPARCV9
+#define SPARC_SET_MAX_SIZE (6 * 4)
+#else
+#define SPARC_SET_MAX_SIZE (2 * 4)
+#endif
+
+#if SPARCV9
+#define sparc_set(ins,ptr,reg) \
+ do { \
+ g_assert ((reg) != sparc_g1); \
+ gint64 val = (gint64)ptr; \
+ guint32 top_word = (val) >> 32; \
+ guint32 bottom_word = (val) & 0xffffffff; \
+ if (val == 0) \
+ sparc_clr_reg ((ins), reg); \
+ else if ((val >= -4096) && ((val) <= 4095)) \
+ sparc_or_imm((ins),FALSE,sparc_g0,bottom_word,(reg)); \
+ else if ((val >= 0) && (val <= 4294967295L)) { \
+ sparc_sethi((ins),bottom_word,(reg)); \
+ if (bottom_word & 0x3ff) \
+ sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \
+ } \
+ else if ((val >= 0) && (val <= (1L << 44) - 1)) { \
+ sparc_sethi ((ins), (val >> 12), (reg)); \
+ sparc_or_imm ((ins), FALSE, (reg), (val >> 12) & 0x3ff, (reg)); \
+ sparc_sllx_imm ((ins),(reg), 12, (reg)); \
+ sparc_or_imm ((ins), FALSE, (reg), (val) & 0xfff, (reg)); \
+ } \
+ else if (top_word == 0xffffffff) { \
+ sparc_xnor ((ins), FALSE, sparc_g0, sparc_g0, sparc_g1); \
+ sparc_sethi((ins),bottom_word,(reg)); \
+ sparc_sllx_imm((ins),sparc_g1,32,sparc_g1); \
+ sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \
+ sparc_or((ins),FALSE,(reg),sparc_g1,(reg)); \
+ } \
+ else { \
+ sparc_sethi((ins),top_word,sparc_g1); \
+ sparc_sethi((ins),bottom_word,(reg)); \
+ sparc_or_imm((ins),FALSE,sparc_g1,top_word&0x3ff,sparc_g1); \
+ sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \
+ sparc_sllx_imm((ins),sparc_g1,32,sparc_g1); \
+ sparc_or((ins),FALSE,(reg),sparc_g1,(reg)); \
+ } \
+ } while (0)
+#else
+#define sparc_set(ins,val,reg) \
+ do { \
+ if ((val) == 0) \
+ sparc_clr_reg((ins),(reg)); \
+ else if (((guint32)(val) & 0x3ff) == 0) \
+ sparc_sethi((ins),(guint32)(val),(reg)); \
+ else if (((gint32)(val) >= -4096) && ((gint32)(val) <= 4095)) \
+ sparc_or_imm((ins),FALSE,sparc_g0,(gint32)(val),(reg)); \
+ else { \
+ sparc_sethi((ins),(guint32)(val),(reg)); \
+ sparc_or_imm((ins),FALSE,(reg),(guint32)(val)&0x3ff,(reg)); \
+ } \
+ } while (0)
+#endif
+
+#define sparc_set_ptr(ins,val,reg) sparc_set(ins,val,reg)
+
+#ifdef SPARCV9
+#define sparc_set_template(ins,reg) sparc_set (ins,0x7fffffff7fffffff, reg)
+#else
+#define sparc_set_template(ins,reg) sparc_set (ins,0x7fffffff, reg)
+#endif
+
+#define sparc_not(ins,reg) sparc_xnor((ins),FALSE,(reg),sparc_g0,(reg))
+#define sparc_neg(ins,reg) sparc_sub((ins),FALSE,sparc_g0,(reg),(reg))
+#define sparc_clr_reg(ins,reg) sparc_or((ins),FALSE,sparc_g0,sparc_g0,(reg))
+
+#define sparc_mov_reg_reg(ins,src,dest) sparc_or((ins),FALSE,sparc_g0,(src),(dest))
+
+#ifdef SPARCV9
+#define sparc_sti_imm sparc_stx_imm
+#define sparc_ldi_imm sparc_ldx_imm
+#define sparc_sti sparc_stx
+#define sparc_ldi sparc_ldx
+#else
+#define sparc_sti_imm sparc_st_imm
+#define sparc_ldi_imm sparc_ld_imm
+#define sparc_sti sparc_st
+#define sparc_ldi sparc_ld
+#endif
+
+#endif /* __SPARC_CODEGEN_H__ */
+
diff --git a/lib/ffts/src/arch/sparc/test.c b/lib/ffts/src/arch/sparc/test.c
new file mode 100644
index 0000000..0d4ad18
--- /dev/null
+++ b/lib/ffts/src/arch/sparc/test.c
@@ -0,0 +1,123 @@
+#include <glib.h>
+#include "sparc-codegen.h"
+
+/* don't run the resulting program, it will destroy your computer,
+ * just objdump -d it to inspect we generated the correct assembler.
+ */
+
+int
+main ()
+{
+ guint32 *p;
+ guint32 code_buffer [500];
+ guint32 local_size = 0, stack_size = 0, code_size = 6;
+ guint32 arg_pos, simpletype;
+ unsigned char *ins;
+ int i, stringp, cur_out_reg, size;
+
+ p = code_buffer;
+
+ printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n");
+
+ /*
+ * Standard function prolog.
+ */
+ sparc_save_imm (p, sparc_sp, -112-stack_size, sparc_sp);
+ cur_out_reg = sparc_o0;
+ arg_pos = 0;
+
+ if (1) {
+ sparc_mov_reg_reg (p, sparc_i2, cur_out_reg);
+ ++cur_out_reg;
+ }
+
+ sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg);
+ ++cur_out_reg;
+ sparc_ld_imm (p, sparc_i3, arg_pos+4, cur_out_reg);
+ ++cur_out_reg;
+ /*
+ * Insert call to function
+ */
+ sparc_jmpl (p, sparc_i0, 0, sparc_callsite);
+ sparc_nop (p);
+
+ sparc_jmpl_imm (p, sparc_i7, 8, sparc_zero);
+ sparc_restore (p, sparc_zero, sparc_zero, sparc_zero);
+
+ sparc_ldsb (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_ldsb_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_ldsh (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_ldsh_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_ldub (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_ldub_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_lduh (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_lduh_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_ldf (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_ldf_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_stb (p, sparc_i3, sparc_l0, sparc_l2);
+ sparc_stb_imm (p, sparc_i3, sparc_o5, 2);
+
+ sparc_sethi (p, 0xff000000, sparc_o2);
+ sparc_rdy (p, sparc_l0);
+ sparc_wry (p, sparc_l0, sparc_l1);
+ sparc_wry_imm (p, sparc_l0, 16);
+ sparc_stbar (p);
+ sparc_unimp (p, 24);
+ sparc_flush (p, sparc_l4, 0);
+
+ sparc_and (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_and_imm (p, FALSE, sparc_l0, 0xff, sparc_o1);
+ sparc_andn (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_or (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_orn (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_xor (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_xnor (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+
+ sparc_sll (p, sparc_l0, sparc_l1, sparc_o1);
+ sparc_sll_imm (p, sparc_l0, 2, sparc_o1);
+ sparc_srl (p, sparc_l0, sparc_l1, sparc_o1);
+ sparc_srl_imm (p, sparc_l0, 2, sparc_o1);
+ sparc_sra (p, sparc_l0, sparc_l1, sparc_o1);
+ sparc_sra_imm (p, sparc_l0, 2, sparc_o1);
+
+ sparc_add (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_add_imm (p, FALSE, sparc_l0, 0xff, sparc_o1);
+ sparc_addx (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_sub (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_subx (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+
+ sparc_muls (p, sparc_l0, sparc_l1, sparc_o1);
+ sparc_umul (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_smul (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_udiv (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_sdiv (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+
+ sparc_branch (p, FALSE, sparc_bne, -12);
+ sparc_ret (p);
+ sparc_retl (p);
+ sparc_test (p, sparc_l4);
+ sparc_cmp (p, sparc_l4, sparc_l6);
+ sparc_cmp_imm (p, sparc_l4, 4);
+ sparc_restore_simple (p);
+
+ sparc_set (p, 0xff000000, sparc_l7);
+ sparc_set (p, 1, sparc_l7);
+ sparc_set (p, 0xff0000ff, sparc_l7);
+
+ sparc_not (p, sparc_g2);
+ sparc_neg (p, sparc_g3);
+ sparc_clr_reg (p, sparc_g4);
+
+
+ size = (p-code_buffer)*4;
+ ins = (gchar*)code_buffer;
+ for (i = 0; i < size; ++i)
+ printf (".byte %d\n", (unsigned int) ins [i]);
+ return 0;
+}
+
diff --git a/lib/ffts/src/arch/sparc/tramp.c b/lib/ffts/src/arch/sparc/tramp.c
new file mode 100644
index 0000000..19c0a78
--- /dev/null
+++ b/lib/ffts/src/arch/sparc/tramp.c
@@ -0,0 +1,1080 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */
+/*
+ * Create trampolines to invoke arbitrary functions.
+ *
+ * Copyright (C) Ximian Inc.
+ *
+ * Authors: Paolo Molaro (lupus@ximian.com)
+ * Jeffrey Stedfast <fejj@ximian.com>
+ * Mark Crichton <crichton@gimp.org>
+ *
+ */
+
+#include "config.h"
+#include <stdlib.h>
+#include <string.h>
+#include "sparc-codegen.h"
+#include "mono/metadata/class.h"
+#include "mono/metadata/tabledefs.h"
+#include "mono/interpreter/interp.h"
+#include "mono/metadata/appdomain.h"
+#include "mono/metadata/debug-helpers.h"
+#include "mono/metadata/marshal.h"
+
+
+#define ARG_SIZE sizeof (stackval)
+#define PROLOG_INS 1
+#define CALL_INS 3 /* Max 3. 1 for the jmpl and 1 for the nop and 1 for the possible unimp */
+#define EPILOG_INS 2
+#define FLOAT_REGS 32
+#define OUT_REGS 6
+#define LOCAL_REGS 8
+#define SLOT_SIZE sizeof(gpointer)
+#if SPARCV9
+#define MINIMAL_STACK_SIZE 22
+#define BIAS 2047
+#define FRAME_ALIGN 16
+#else
+#define MINIMAL_STACK_SIZE 23
+#define BIAS 0
+#define FRAME_ALIGN 8
+#endif
+
+#define NOT_IMPL(x) g_error("FIXME: %s", x);
+/*#define DEBUG(a) a*/
+#define DEBUG(a)
+
+/* Some assembly... */
+#ifdef __GNUC__
+#define flushi(addr) __asm__ __volatile__ ("flush %0"::"r"(addr):"memory")
+#else
+static void flushi(void *addr)
+{
+ asm("flush %i0");
+}
+#endif
+
+static char*
+sig_to_name (MonoMethodSignature *sig, const char *prefix)
+{
+ int i;
+ char *result;
+ GString *res = g_string_new ("");
+ char *p;
+
+ if (prefix) {
+ g_string_append (res, prefix);
+ g_string_append_c (res, '_');
+ }
+
+ mono_type_get_desc (res, sig->ret, TRUE);
+
+ for (i = 0; i < sig->param_count; ++i) {
+ g_string_append_c (res, '_');
+ mono_type_get_desc (res, sig->params [i], TRUE);
+ }
+ result = res->str;
+ p = result;
+ /* remove chars Sun's asssembler doesn't like */
+ while (*p != '\0') {
+ if (*p == '.' || *p == '/')
+ *p = '_';
+ else if (*p == '&')
+ *p = '$';
+ else if (*p == '[' || *p == ']')
+ *p = 'X';
+ p++;
+ }
+ g_string_free (res, FALSE);
+ return result;
+}
+
+static void
+sparc_disassemble_code (guint32 *code_buffer, guint32 *p, const char *id)
+{
+ guchar *cp;
+ FILE *ofd;
+
+ if (!(ofd = fopen ("/tmp/test.s", "w")))
+ g_assert_not_reached();
+
+ fprintf (ofd, "%s:\n", id);
+
+ for (cp = (guchar *)code_buffer; cp < (guchar *)p; cp++)
+ fprintf (ofd, ".byte %d\n", *cp);
+
+ fclose (ofd);
+
+#ifdef __GNUC__
+ system ("as /tmp/test.s -o /tmp/test.o;objdump -d /tmp/test.o");
+#else
+ /* this assumes we are using Sun tools as we aren't GCC */
+#if SPARCV9
+ system ("as -xarch=v9 /tmp/test.s -o /tmp/test.o;dis /tmp/test.o");
+#else
+ system ("as /tmp/test.s -o /tmp/test.o;dis /tmp/test.o");
+#endif
+#endif
+}
+
+
+static void
+add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple)
+{
+ if (simple) {
+ if (*gr >= OUT_REGS) {
+ *stack_size += SLOT_SIZE;
+ *code_size += 12;
+ } else {
+ *code_size += 4;
+ }
+ } else {
+ if (*gr >= OUT_REGS - 1) {
+ *stack_size += 8 + (*stack_size % 8); /* ???64 */
+ *code_size += 16;
+ } else {
+ *code_size += 16;
+ }
+ (*gr)++;
+ }
+ (*gr)++;
+}
+
+static void
+calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size,
+ gboolean string_ctor, gboolean *use_memcpy)
+{
+ guint i, fr, gr;
+ guint32 simpletype;
+
+ fr = gr = 0;
+ *stack_size = MINIMAL_STACK_SIZE * SLOT_SIZE;
+ *code_size = (PROLOG_INS + CALL_INS + EPILOG_INS) * 4;
+
+ /* function arguments */
+ if (sig->hasthis)
+ add_general (&gr, stack_size, code_size, TRUE);
+
+ for (i = 0; i < sig->param_count; i++) {
+ if (sig->params[i]->byref) {
+ add_general (&gr, stack_size, code_size, TRUE);
+ continue;
+ }
+ simpletype = sig->params[i]->type;
+ enum_calc_size:
+ switch (simpletype) {
+ case MONO_TYPE_R4:
+#if SPARCV9
+ (*code_size) += 4; /* for the fdtos */
+#else
+ (*code_size) += 12;
+ (*stack_size) += 4;
+#endif
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_SZARRAY:
+ add_general (&gr, stack_size, code_size, TRUE);
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ guint32 align;
+ if (sig->params[i]->data.klass->enumtype) {
+ simpletype = sig->params[i]->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+ size = mono_class_native_size (sig->params[i]->data.klass, &align);
+#if SPARCV9
+ if (size != 4) {
+#else
+ if (1) {
+#endif
+ DEBUG(fprintf(stderr, "copy %d byte struct on stack\n", size));
+ *use_memcpy = TRUE;
+ *code_size += 8*4;
+
+ *stack_size = (*stack_size + (align - 1)) & (~(align -1));
+ *stack_size += (size + 3) & (~3);
+ if (gr > OUT_REGS) {
+ *code_size += 4;
+ *stack_size += 4;
+ }
+ } else {
+ add_general (&gr, stack_size, code_size, TRUE);
+#if SPARCV9
+ *code_size += 8;
+#else
+ *code_size += 4;
+#endif
+ }
+ break;
+ }
+ case MONO_TYPE_I8:
+ case MONO_TYPE_R8:
+ add_general (&gr, stack_size, code_size, FALSE);
+ break;
+ default:
+ g_error ("Can't trampoline 0x%x", sig->params[i]->type);
+ }
+ }
+
+ /* function return value */
+ if (sig->ret->byref || string_ctor) {
+ *code_size += 8;
+ } else {
+ simpletype = sig->ret->type;
+ enum_retvalue:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ *code_size += 8;
+ break;
+ case MONO_TYPE_I8:
+ *code_size += 12;
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ size = mono_class_native_size (sig->ret->data.klass, NULL);
+#if SPARCV9
+ if (size <= 32)
+ *code_size += 8 + (size + 7) / 2;
+ else
+ *code_size += 8;
+#else
+ *code_size += 8;
+#endif
+ break;
+ }
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", sig->ret->type);
+ }
+ }
+
+ if (*use_memcpy) {
+ *stack_size += 8;
+ *code_size += 24;
+ if (sig->hasthis) {
+ *stack_size += SLOT_SIZE;
+ *code_size += 4;
+ }
+ }
+
+ *stack_size = (*stack_size + (FRAME_ALIGN - 1)) & (~(FRAME_ALIGN -1));
+}
+
+static inline guint32 *
+emit_epilog (guint32 *p, MonoMethodSignature *sig, guint stack_size)
+{
+ int ret_offset = 8;
+
+ /*
+ * Standard epilog.
+ * 8 may be 12 when returning structures (to skip unimp opcode).
+ */
+#if !SPARCV9
+ if (sig != NULL && !sig->ret->byref && sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype)
+ ret_offset = 12;
+#endif
+ sparc_jmpl_imm (p, sparc_i7, ret_offset, sparc_zero);
+ sparc_restore (p, sparc_zero, sparc_zero, sparc_zero);
+
+ return p;
+}
+
+static inline guint32 *
+emit_prolog (guint32 *p, MonoMethodSignature *sig, guint stack_size)
+{
+ /* yes kids, it is this simple! */
+ sparc_save_imm (p, sparc_sp, -stack_size, sparc_sp);
+ return p;
+}
+
+#if SPARCV9
+#define sparc_st_ptr(a,b,c,d) sparc_stx(a,b,c,d)
+#define sparc_st_imm_ptr(a,b,c,d) sparc_stx_imm(a,b,c,d)
+#define sparc_ld_ptr(a,b,c,d) sparc_ldx(a,b,c,d)
+#define sparc_ld_imm_ptr(a,b,c,d) sparc_ldx_imm(a,b,c,d)
+#else
+#define sparc_st_ptr(a,b,c,d) sparc_st(a,b,c,d)
+#define sparc_st_imm_ptr(a,b,c,d) sparc_st_imm(a,b,c,d)
+#define sparc_ld_ptr(a,b,c,d) sparc_ld(a,b,c,d)
+#define sparc_ld_imm_ptr(a,b,c,d) sparc_ld_imm(a,b,c,d)
+#endif
+
+/* synonyms for when values are really widened scalar values */
+#define sparc_st_imm_word sparc_st_imm_ptr
+
+#define ARG_BASE sparc_i3 /* pointer to args in i3 */
+#define SAVE_PTR_IN_GENERIC_REGISTER \
+ if (gr < OUT_REGS) { \
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); \
+ gr++; \
+ } else { \
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp, stack_par_pos); \
+ stack_par_pos += SLOT_SIZE; \
+ }
+
+#if SPARCV9
+/* This is a half hearted attempt at coping with structs by value - the
+ actual convention is complicated when floats & doubles are involved as
+ you end up with fields in different registers on/off the stack.
+ It will take more time to get right... */
+static guint32 *
+v9_struct_arg(guint32 *p, int arg_index, MonoClass *klass, int size, guint *p_gr)
+{
+ MonoMarshalType *info = mono_marshal_load_type_info (klass);
+ int off = 0;
+ int index = 0;
+ guint gr = *p_gr;
+ sparc_ld_imm_ptr (p, ARG_BASE, arg_index*ARG_SIZE, sparc_l0);
+ if (size > 8) {
+ if (info->fields [index].field->type->type == MONO_TYPE_R8) {
+ sparc_lddf_imm (p, sparc_l0, 0, sparc_f0 + 2 * gr);
+ index++;
+ }
+ else {
+ sparc_ldx_imm (p, sparc_l0, 0, sparc_o0 + gr);
+ index++; /* FIXME could be multiple fields in one register */
+ }
+ gr++;
+ size -= 8;
+ off = 8;
+ }
+ if (size > 0) {
+ if (info->fields [index].field->type->type == MONO_TYPE_R8) {
+ sparc_lddf_imm (p, sparc_l0, off, sparc_f0 + 2 * gr);
+ index++;
+ }
+ else {
+ /* will load extra garbage off end of short structs ... */
+ sparc_ldx_imm (p, sparc_l0, off, sparc_o0 + gr);
+ }
+ gr++;
+ }
+ *p_gr = gr;
+ return p;
+}
+#endif
+
+static inline guint32*
+emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size,
+ gboolean use_memcpy)
+{
+ guint i, fr, gr, stack_par_pos, struct_pos, cur_struct_pos;
+ guint32 simpletype;
+
+ fr = gr = 0;
+ stack_par_pos = MINIMAL_STACK_SIZE * SLOT_SIZE + BIAS;
+
+ if (sig->hasthis) {
+ if (use_memcpy) {
+ /* we don't need to save a thing. */
+ } else
+ sparc_mov_reg_reg (p, sparc_i2, sparc_o0);
+ gr ++;
+ }
+
+ if (use_memcpy) {
+ cur_struct_pos = struct_pos = stack_par_pos;
+ for (i = 0; i < sig->param_count; i++) {
+ if (sig->params[i]->byref)
+ continue;
+ if (sig->params[i]->type == MONO_TYPE_VALUETYPE &&
+ !sig->params[i]->data.klass->enumtype) {
+ gint size;
+ guint32 align;
+
+ size = mono_class_native_size (sig->params[i]->data.klass, &align);
+#if SPARCV9
+ if (size != 4) {
+#else
+ if (1) {
+#endif
+ /* Add alignment */
+ stack_par_pos = (stack_par_pos + (align - 1)) & (~(align - 1));
+ /* need to call memcpy here */
+ sparc_add_imm (p, 0, sparc_sp, stack_par_pos, sparc_o0);
+ sparc_ld_imm_ptr (p, sparc_i3, i*16, sparc_o1);
+ sparc_set (p, (guint32)size, sparc_o2);
+ sparc_set_ptr (p, (void *)memmove, sparc_l0);
+ sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite);
+ sparc_nop (p);
+ stack_par_pos += (size + (SLOT_SIZE - 1)) & (~(SLOT_SIZE - 1));
+ }
+ }
+ }
+ }
+
+ if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) {
+ MonoClass *klass = sig->ret->data.klass;
+ if (!klass->enumtype) {
+ gint size = mono_class_native_size (klass, NULL);
+
+ DEBUG(fprintf(stderr, "retval value type size: %d\n", size));
+#if SPARCV9
+ if (size > 32) {
+#else
+ {
+#endif
+ /* pass on buffer in interp.c to called function */
+ sparc_ld_imm_ptr (p, sparc_i1, 0, sparc_l0);
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp, 64);
+ }
+ }
+ }
+
+ DEBUG(fprintf(stderr, "%s\n", sig_to_name(sig, FALSE)));
+
+ for (i = 0; i < sig->param_count; i++) {
+ if (sig->params[i]->byref) {
+ SAVE_PTR_IN_GENERIC_REGISTER;
+ continue;
+ }
+ simpletype = sig->params[i]->type;
+ enum_calc_size:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ if (gr < OUT_REGS) {
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr);
+ gr++;
+ } else {
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+
+ case MONO_TYPE_R4:
+#if SPARCV9
+ sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f30); /* fix using this fixed reg */
+ sparc_fdtos(p, sparc_f30, sparc_f0 + 2 * gr + 1);
+ gr++;
+ break;
+#else
+ /* Convert from double to single */
+ sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f0);
+ sparc_fdtos (p, sparc_f0, sparc_f0);
+
+ /*
+ * FIXME: Is there an easier way to do an
+ * freg->ireg move ?
+ */
+ sparc_stf_imm (p, sparc_f0, sparc_sp, stack_par_pos);
+
+ if (gr < OUT_REGS) {
+ sparc_ld_imm (p, sparc_sp, stack_par_pos, sparc_o0 + gr);
+ gr++;
+ } else {
+ sparc_ldf_imm (p, sparc_sp, stack_par_pos, sparc_f0);
+ sparc_stf_imm (p, sparc_f0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+#endif
+
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_SZARRAY:
+ SAVE_PTR_IN_GENERIC_REGISTER;
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ guint32 align;
+ MonoClass *klass = sig->params[i]->data.klass;
+ if (klass->enumtype) {
+ simpletype = klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+ size = mono_class_native_size (klass, &align);
+#if SPARCV9
+ if (size <= 16) {
+ if (gr < OUT_REGS) {
+ p = v9_struct_arg(p, i, klass, size, &gr);
+ } else {
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_ld_imm (p, sparc_l0, 0, sparc_l0);
+ sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+ }
+#else
+ /*
+ * FIXME: The 32bit ABI docs do not mention that small
+ * structures are passed in registers.
+ */
+
+ /*
+ if (size == 4) {
+ if (gr < OUT_REGS) {
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_ld_imm (p, sparc_l0, 0, sparc_o0 + gr);
+ gr++;
+ } else {
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_ld_imm (p, sparc_l0, 0, sparc_l0);
+ sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+ }
+ */
+#endif
+
+ cur_struct_pos = (cur_struct_pos + (align - 1)) & (~(align - 1));
+ if (gr < OUT_REGS) {
+ sparc_add_imm (p, 0, sparc_sp,
+ cur_struct_pos, sparc_o0 + gr);
+ gr ++;
+ } else {
+ sparc_ld_imm_ptr (p, sparc_sp,
+ cur_struct_pos,
+ sparc_l1);
+ sparc_st_imm_ptr (p, sparc_l1,
+ sparc_sp,
+ stack_par_pos);
+ }
+ cur_struct_pos += (size + (SLOT_SIZE - 1)) & (~(SLOT_SIZE - 1));
+ break;
+ }
+
+#if SPARCV9
+ case MONO_TYPE_I8:
+ if (gr < OUT_REGS) {
+ sparc_ldx_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr);
+ gr++;
+ } else {
+ sparc_ldx_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_stx_imm (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+ case MONO_TYPE_R8:
+ sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f0 + 2 * i);
+ break;
+#else
+ case MONO_TYPE_I8:
+ case MONO_TYPE_R8:
+ if (gr < (OUT_REGS - 1)) {
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr);
+ gr ++;
+
+ sparc_ld_imm (p, ARG_BASE,
+ (i*ARG_SIZE) + 4,
+ sparc_o0 + gr);
+ gr ++;
+ } else if (gr == (OUT_REGS - 1)) {
+ /* Split register/stack */
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr);
+ gr ++;
+
+ sparc_ld_imm (p, ARG_BASE, (i*ARG_SIZE) + 4, sparc_l0);
+ sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ } else {
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+
+ sparc_ld_imm (p, ARG_BASE, (i*ARG_SIZE) + 4, sparc_l0);
+ sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+#endif
+ default:
+ g_error ("Can't trampoline 0x%x", sig->params[i]->type);
+ }
+ }
+
+ g_assert ((stack_par_pos - BIAS) <= stack_size);
+
+ return p;
+}
+
+static inline guint32 *
+alloc_code_memory (guint code_size)
+{
+ guint32 *p;
+
+ p = g_malloc(code_size);
+
+ return p;
+}
+
+static inline guint32 *
+emit_call_and_store_retval (guint32 *p, MonoMethodSignature *sig,
+ guint stack_size, gboolean string_ctor)
+{
+ guint32 simpletype;
+
+ /* call "callme" */
+ sparc_jmpl_imm (p, sparc_i0, 0, sparc_callsite);
+ sparc_nop (p);
+#if !SPARCV9
+ if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) {
+ int size = mono_class_native_size (sig->ret->data.klass, NULL);
+ sparc_unimp (p, size & 4095);
+ }
+#endif
+
+ /* get return value */
+ if (sig->ret->byref || string_ctor) {
+ sparc_st_ptr (p, sparc_o0, sparc_i1, 0);
+ } else {
+ simpletype = sig->ret->type;
+ enum_retval:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ sparc_stb (p, sparc_o0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ sparc_sth (p, sparc_o0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ sparc_st (p, sparc_o0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_PTR:
+ sparc_st_ptr (p, sparc_o0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_R4:
+ sparc_stf (p, sparc_f0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_R8:
+ sparc_stdf (p, sparc_f0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_I8:
+#if SPARCV9
+ sparc_stx (p, sparc_o0, sparc_i1, 0);
+#else
+ sparc_std (p, sparc_o0, sparc_i1, 0);
+#endif
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retval;
+ }
+#if SPARCV9
+ size = mono_class_native_size (sig->ret->data.klass, NULL);
+ if (size <= 32) {
+ int n_regs = size / 8;
+ int j;
+ sparc_ldx_imm (p, sparc_i1, 0, sparc_i1);
+ /* wrong if there are floating values in the struct... */
+ for (j = 0; j < n_regs; j++) {
+ sparc_stx_imm (p, sparc_o0 + j, sparc_i1, j * 8);
+ }
+ size -= n_regs * 8;
+ if (size > 0) {
+ int last_reg = sparc_o0 + n_regs;
+ /* get value right aligned in register */
+ sparc_srlx_imm(p, last_reg, 64 - 8 * size, last_reg);
+ if ((size & 1) != 0) {
+ sparc_stb_imm (p, last_reg, sparc_i1, n_regs * 8 + size - 1);
+ size--;
+ if (size > 0)
+ sparc_srlx_imm(p, last_reg, 8, last_reg);
+ }
+ if ((size & 2) != 0) {
+ sparc_sth_imm (p, last_reg, sparc_i1, n_regs * 8 + size - 2);
+ size -= 2;
+ if (size > 0)
+ sparc_srlx_imm(p, last_reg, 16, last_reg);
+ }
+ if ((size & 4) != 0)
+ sparc_st_imm (p, last_reg, sparc_i1, n_regs * 8);
+ }
+ }
+#endif
+ }
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", sig->ret->type);
+ }
+ }
+ return p;
+}
+
+MonoPIFunc
+mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor)
+{
+ guint32 *p, *code_buffer;
+ guint stack_size, code_size, i;
+ gboolean use_memcpy = FALSE;
+ static GHashTable *cache = NULL;
+ MonoPIFunc res;
+
+ if (!cache)
+ cache = g_hash_table_new ((GHashFunc)mono_signature_hash,
+ (GCompareFunc)mono_metadata_signature_equal);
+
+ if ((res = (MonoPIFunc)g_hash_table_lookup(cache, sig)))
+ return res;
+
+ calculate_sizes (sig, &stack_size, &code_size,
+ string_ctor, &use_memcpy);
+
+ p = code_buffer = alloc_code_memory (code_size);
+ p = emit_prolog (p, sig, stack_size);
+ p = emit_save_parameters (p, sig, stack_size, use_memcpy);
+ p = emit_call_and_store_retval (p, sig, stack_size, string_ctor);
+ /* we don't return structs here so pass in NULL as signature */
+ p = emit_epilog (p, NULL, stack_size);
+
+ g_assert(p <= code_buffer + (code_size / 4));
+
+ DEBUG(sparc_disassemble_code (code_buffer, p, sig_to_name(sig, NULL)));
+
+ /* So here's the deal...
+ * UltraSPARC will flush a whole cache line at a time
+ * BUT, older SPARCs won't.
+ * So, be compatable and flush dwords at a time...
+ */
+
+ for (i = 0; i < ((p - code_buffer)/2); i++)
+ flushi((code_buffer + (i*8)));
+
+ g_hash_table_insert(cache, sig, code_buffer);
+
+ return (MonoPIFunc)code_buffer;
+}
+
+#define MINV_POS (MINIMAL_STACK_SIZE * SLOT_SIZE + BIAS)
+
+void *
+mono_arch_create_method_pointer (MonoMethod *method)
+{
+ MonoMethodSignature *sig;
+ MonoJitInfo *ji;
+ guint stack_size, code_size, stackval_arg_pos, local_pos;
+ guint i, local_start, reg_param = 0, stack_param, cpos, vt_cur;
+ guint32 align = 0;
+ guint32 *p, *code_buffer;
+ gint *vtbuf;
+ gint32 simpletype;
+
+ code_size = 1024; /* these should be calculated... */
+ stack_size = 1024;
+ stack_param = 0;
+
+ sig = method->signature;
+
+ p = code_buffer = g_malloc (code_size);
+
+ DEBUG(fprintf(stderr, "Delegate [start emiting] %s\n", method->name));
+ DEBUG(fprintf(stderr, "%s\n", sig_to_name(sig, FALSE)));
+
+ p = emit_prolog (p, sig, stack_size);
+
+ /* fill MonoInvocation */
+ sparc_st_imm_ptr (p, sparc_g0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)));
+ sparc_st_imm_ptr (p, sparc_g0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)));
+ sparc_st_imm_ptr (p, sparc_g0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)));
+
+ sparc_set_ptr (p, (void *)method, sparc_l0);
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)));
+
+ stackval_arg_pos = MINV_POS + sizeof (MonoInvocation);
+ local_start = local_pos = stackval_arg_pos + (sig->param_count + 1) * sizeof (stackval);
+
+ if (sig->hasthis) {
+ sparc_st_imm_ptr (p, sparc_i0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)));
+ reg_param = 1;
+ }
+
+ if (sig->param_count) {
+ gint save_count = MIN (OUT_REGS, sig->param_count + sig->hasthis);
+ for (i = reg_param; i < save_count; i++) {
+ sparc_st_imm_ptr (p, sparc_i0 + i, sparc_sp, local_pos);
+ local_pos += SLOT_SIZE;
+ }
+ }
+
+ /* prepare space for valuetypes */
+ vt_cur = local_pos;
+ vtbuf = alloca (sizeof(int)*sig->param_count);
+ cpos = 0;
+ for (i = 0; i < sig->param_count; i++) {
+ MonoType *type = sig->params [i];
+ vtbuf [i] = -1;
+ if (!sig->params[i]->byref && type->type == MONO_TYPE_VALUETYPE) {
+ MonoClass *klass = type->data.klass;
+ gint size;
+
+ if (klass->enumtype)
+ continue;
+ size = mono_class_native_size (klass, &align);
+ cpos += align - 1;
+ cpos &= ~(align - 1);
+ vtbuf [i] = cpos;
+ cpos += size;
+ }
+ }
+ cpos += SLOT_SIZE - 1;
+ cpos &= ~(SLOT_SIZE - 1);
+
+ local_pos += cpos;
+
+ /* set MonoInvocation::stack_args */
+ sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0);
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)));
+
+ /* add stackval arguments */
+ for (i=0; i < sig->param_count; i++) {
+ int stack_offset;
+ int type;
+ if (reg_param < OUT_REGS) {
+ stack_offset = local_start + i * SLOT_SIZE;
+ reg_param++;
+ } else {
+ stack_offset = stack_size + 8 + stack_param;
+ stack_param++;
+ }
+
+ if (!sig->params[i]->byref) {
+ type = sig->params[i]->type;
+ enum_arg:
+ switch (type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R8:
+ break;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ stack_offset += SLOT_SIZE - 4;
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ stack_offset += SLOT_SIZE - 2;
+ break;
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_BOOLEAN:
+ stack_offset += SLOT_SIZE - 1;
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->params[i]->data.klass->enumtype) {
+ type = sig->params[i]->data.klass->enum_basetype->type;
+ goto enum_arg;
+ }
+ g_assert(vtbuf[i] >= 0);
+ break;
+ default:
+ g_error ("can not cope with delegate arg type %d", type);
+ }
+ }
+
+ sparc_add_imm (p, 0, sparc_sp, stack_offset, sparc_o2);
+
+ if (vtbuf[i] >= 0) {
+ sparc_add_imm (p, 0, sparc_sp, vt_cur, sparc_o1);
+ sparc_st_imm_ptr (p, sparc_o1, sparc_sp, stackval_arg_pos);
+ sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos,
+ sparc_o1);
+ sparc_ld_imm_ptr (p, sparc_o2, 0, sparc_o2);
+ vt_cur += vtbuf[i];
+ } else {
+ sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos,
+ sparc_o1);
+ }
+
+ sparc_set_ptr (p, (void *)sig->params[i], sparc_o0);
+ sparc_set (p, (guint32)sig->pinvoke, sparc_o3);
+
+ /* YOU make the CALL! */
+ sparc_set_ptr (p, (void *)stackval_from_data, sparc_l0);
+ sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite);
+ sparc_nop (p);
+ stackval_arg_pos += sizeof(stackval);
+ }
+
+ /* return value storage */
+ /* Align to dword */
+ stackval_arg_pos = (stackval_arg_pos + (8 - 1)) & (~(8 -1));
+ if (sig->param_count) {
+ sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0);
+ }
+ if (!sig->ret->byref && sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) {
+#if !SPARCV9
+ /* pass on callers buffer */
+ sparc_ld_imm_ptr (p, sparc_fp, 64, sparc_l1);
+ sparc_st_imm_ptr (p, sparc_l1, sparc_l0, 0);
+#else
+ sparc_add_imm (p, 0, sparc_l0, sizeof(stackval), sparc_l1);
+ sparc_st_imm_ptr (p, sparc_l1, sparc_l0, 0);
+#endif
+ }
+
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)));
+
+ /* call ves_exec_method */
+ sparc_add_imm (p, 0, sparc_sp, MINV_POS, sparc_o0);
+ sparc_set_ptr (p, (void *)ves_exec_method, sparc_l0);
+ sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite);
+ sparc_nop (p);
+
+ /* move retval from stackval to proper place (r3/r4/...) */
+ if (sig->ret->byref) {
+ sparc_ld_imm_ptr (p, sparc_sp, stackval_arg_pos, sparc_i0 );
+ } else {
+ enum_retvalue:
+ switch (sig->ret->type) {
+ case MONO_TYPE_VOID:
+ break;
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0);
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_CLASS:
+ sparc_ld_imm_ptr (p, sparc_sp, stackval_arg_pos, sparc_i0);
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+#if SPARCV9
+ sparc_ldx_imm (p, sparc_sp, stackval_arg_pos, sparc_i0);
+#else
+ sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0);
+ sparc_ld_imm (p, sparc_sp, stackval_arg_pos + 4, sparc_i1);
+#endif
+ break;
+ case MONO_TYPE_R4:
+ sparc_lddf_imm (p, sparc_sp, stackval_arg_pos, sparc_f0);
+ sparc_fdtos(p, sparc_f0, sparc_f0);
+ break;
+ case MONO_TYPE_R8:
+ sparc_lddf_imm (p, sparc_sp, stackval_arg_pos, sparc_f0);
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ gint reg = sparc_i0;
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+#if SPARCV9
+ size = mono_class_native_size (sig->ret->data.klass, NULL);
+ sparc_ldx_imm (p, sparc_sp, stackval_arg_pos, sparc_l0);
+ if (size <= 16) {
+ gint off = 0;
+ if (size >= 8) {
+ sparc_ldx_imm (p, sparc_l0, 0, reg);
+ size -= 8;
+ off += 8;
+ reg++;
+ }
+ if (size > 0)
+ sparc_ldx_imm (p, sparc_l0, off, reg);
+ } else
+ NOT_IMPL("value type as ret val from delegate");
+#endif
+ break;
+ }
+ default:
+ g_error ("Type 0x%x not handled yet in thunk creation",
+ sig->ret->type);
+ break;
+ }
+ }
+
+ p = emit_epilog (p, sig, stack_size);
+
+ for (i = 0; i < ((p - code_buffer)/2); i++)
+ flushi((code_buffer + (i*8)));
+
+ ji = g_new0 (MonoJitInfo, 1);
+ ji->method = method;
+ ji->code_size = p - code_buffer;
+ ji->code_start = code_buffer;
+
+ mono_jit_info_table_add (mono_get_root_domain (), ji);
+
+ DEBUG(sparc_disassemble_code (code_buffer, p, method->name));
+
+ DEBUG(fprintf(stderr, "Delegate [end emiting] %s\n", method->name));
+
+ return ji->code_start;
+}
diff --git a/lib/ffts/src/arch/x64/.gitignore b/lib/ffts/src/arch/x64/.gitignore
new file mode 100644
index 0000000..6930f61
--- /dev/null
+++ b/lib/ffts/src/arch/x64/.gitignore
@@ -0,0 +1,4 @@
+/Makefile.in
+/Makefile
+/.deps
+/.libs
diff --git a/lib/ffts/src/arch/x64/Makefile.am b/lib/ffts/src/arch/x64/Makefile.am
new file mode 100644
index 0000000..db9d583
--- /dev/null
+++ b/lib/ffts/src/arch/x64/Makefile.am
@@ -0,0 +1,2 @@
+EXTRA_DIST = x64-codegen.h
+
diff --git a/lib/ffts/src/arch/x64/x64-codegen.h b/lib/ffts/src/arch/x64/x64-codegen.h
new file mode 100644
index 0000000..02b9907
--- /dev/null
+++ b/lib/ffts/src/arch/x64/x64-codegen.h
@@ -0,0 +1,1938 @@
+/*
+ * x64-codegen.h: Macros for generating x86-64 code
+ *
+ * Authors:
+ * Paolo Molaro (lupus@ximian.com)
+ * Intel Corporation (ORP Project)
+ * Sergey Chaban (serge@wildwestsoftware.com)
+ * Dietmar Maurer (dietmar@ximian.com)
+ * Patrik Torstensson
+ * Zalman Stern
+ *
+ * Copyright (C) 2000 Intel Corporation. All rights reserved.
+ * Copyright (C) 2001, 2002 Ximian, Inc.
+ */
+
+#ifndef X64_H
+#define X64_H
+
+#include "../x86/x86-codegen.h"
+
+#include <stdint.h>
+
+/* x86-64 general purpose registers */
+typedef enum {
+ X64_RAX = 0,
+ X64_RCX = 1,
+ X64_RDX = 2,
+ X64_RBX = 3,
+ X64_RSP = 4,
+ X64_RBP = 5,
+ X64_RSI = 6,
+ X64_RDI = 7,
+ X64_R8 = 8,
+ X64_R9 = 9,
+ X64_R10 = 10,
+ X64_R11 = 11,
+ X64_R12 = 12,
+ X64_R13 = 13,
+ X64_R14 = 14,
+ X64_R15 = 15,
+ X64_RIP = 16,
+ X64_NREG
+} X64_Reg_No;
+
+/* x86-64 XMM registers */
+typedef enum {
+ X64_XMM0 = 0,
+ X64_XMM1 = 1,
+ X64_XMM2 = 2,
+ X64_XMM3 = 3,
+ X64_XMM4 = 4,
+ X64_XMM5 = 5,
+ X64_XMM6 = 6,
+ X64_XMM7 = 7,
+ X64_XMM8 = 8,
+ X64_XMM9 = 9,
+ X64_XMM10 = 10,
+ X64_XMM11 = 11,
+ X64_XMM12 = 12,
+ X64_XMM13 = 13,
+ X64_XMM14 = 14,
+ X64_XMM15 = 15,
+ X64_XMM_NREG = 16,
+} X64_XMM_Reg_No;
+
+typedef enum
+{
+ X64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */
+ X64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */
+ X64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */
+ X64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */
+} X64_REX_Bits;
+
+#if defined(__native_client_codegen__)
+
+#define x64_codegen_pre(inst) uint8_t* _codegen_start = (inst); x64_nacl_instruction_pre();
+#define x64_codegen_post(inst) (x64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start);
+
+/* Because of rex prefixes, etc, call sequences are not constant size. */
+/* These pre- and post-sequence hooks remedy this by aligning the call */
+/* sequence after we emit it, since we will know the exact size then. */
+#define x64_call_sequence_pre(inst) uint8_t* _code_start = (inst);
+#define x64_call_sequence_post(inst) \
+ (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
+
+/* Native client can load/store using one of the following registers */
+/* as a base: rip, r15, rbp, rsp. Any other base register needs to have */
+/* its upper 32 bits cleared and reference memory using r15 as the base. */
+#define x64_is_valid_nacl_base(reg) \
+ ((reg) == X64_RIP || (reg) == X64_R15 || \
+ (reg) == X64_RBP || (reg) == X64_RSP)
+#else
+
+#define x64_codegen_pre(inst)
+#define x64_codegen_post(inst)
+
+#endif /* __native_client_codegen__ */
+
+#ifdef TARGET_WIN32
+#define X64_ARG_REG1 X64_RCX
+#define X64_ARG_REG2 X64_RDX
+#define X64_ARG_REG3 X64_R8
+#define X64_ARG_REG4 X64_R9
+#else
+#define X64_ARG_REG1 X64_RDI
+#define X64_ARG_REG2 X64_RSI
+#define X64_ARG_REG3 X64_RDX
+#define X64_ARG_REG4 X64_RCX
+#endif
+
+#ifdef TARGET_WIN32
+#define X64_CALLEE_REGS ((1 << X64_RAX) | (1 << X64_RCX) | (1 << X64_RDX) | (1 << X64_R8) | (1 << X64_R9) | (1 << X64_R10))
+#define X64_IS_CALLEE_REG(reg) (X64_CALLEE_REGS & (1 << (reg)))
+
+#define X64_ARGUMENT_REGS ((1 << X64_RDX) | (1 << X64_RCX) | (1 << X64_R8) | (1 << X64_R9))
+#define X64_IS_ARGUMENT_REG(reg) (X64_ARGUMENT_REGS & (1 << (reg)))
+
+#define X64_CALLEE_SAVED_REGS ((1 << X64_RDI) | (1 << X64_RSI) | (1 << X64_RBX) | (1 << X64_R12) | (1 << X64_R13) | (1 << X64_R14) | (1 << X64_R15) | (1 << X64_RBP))
+#define X64_IS_CALLEE_SAVED_REG(reg) (X64_CALLEE_SAVED_REGS & (1 << (reg)))
+#elif defined(__native_client_codegen__)
+/* x64 Native Client code may not write R15 */
+#define X64_CALLEE_REGS ((1 << X64_RAX) | (1 << X64_RCX) | (1 << X64_RDX) | (1 << X64_RSI) | (1 << X64_RDI) | (1 << X64_R8) | (1 << X64_R9) | (1 << X64_R10))
+#define X64_IS_CALLEE_REG(reg) (X64_CALLEE_REGS & (1 << (reg)))
+
+#define X64_ARGUMENT_REGS ((1 << X64_RDI) | (1 << X64_RSI) | (1 << X64_RDX) | (1 << X64_RCX) | (1 << X64_R8) | (1 << X64_R9))
+#define X64_IS_ARGUMENT_REG(reg) (X64_ARGUMENT_REGS & (1 << (reg)))
+
+#define X64_CALLEE_SAVED_REGS ((1 << X64_RBX) | (1 << X64_R12) | (1 << X64_R13) | (1 << X64_R14) | (1 << X64_RBP))
+#define X64_IS_CALLEE_SAVED_REG(reg) (X64_CALLEE_SAVED_REGS & (1 << (reg)))
+#else
+#define X64_CALLEE_REGS ((1 << X64_RAX) | (1 << X64_RCX) | (1 << X64_RDX) | (1 << X64_RSI) | (1 << X64_RDI) | (1 << X64_R8) | (1 << X64_R9) | (1 << X64_R10))
+#define X64_IS_CALLEE_REG(reg) (X64_CALLEE_REGS & (1 << (reg)))
+
+#define X64_ARGUMENT_REGS ((1 << X64_RDI) | (1 << X64_RSI) | (1 << X64_RDX) | (1 << X64_RCX) | (1 << X64_R8) | (1 << X64_R9))
+#define X64_IS_ARGUMENT_REG(reg) (X64_ARGUMENT_REGS & (1 << (reg)))
+
+#define X64_CALLEE_SAVED_REGS ((1 << X64_RBX) | (1 << X64_R12) | (1 << X64_R13) | (1 << X64_R14) | (1 << X64_R15) | (1 << X64_RBP))
+#define X64_IS_CALLEE_SAVED_REG(reg) (X64_CALLEE_SAVED_REGS & (1 << (reg)))
+#endif
+
+#define X64_REX(bits) ((unsigned char)(0x40 | (bits)))
+
+#if defined(__native_client_codegen__)
+#define x64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \
+ { \
+ unsigned char _x64_rex_bits = \
+ (((width) > 4) ? X64_REX_W : 0) | \
+ (((reg_modrm) > 7) ? X64_REX_R : 0) | \
+ (((reg_index) > 7) ? X64_REX_X : 0) | \
+ (((reg_rm_base_opcode) > 7) ? X64_REX_B : 0); \
+ x64_nacl_tag_rex((inst)); \
+ if ((_x64_rex_bits != 0) || (((width) == 1))) *(inst)++ = X64_REX(_x64_rex_bits); \
+ } while (0)
+#else
+#define x64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \
+ { \
+ unsigned char _x64_rex_bits = \
+ (((width) > 4) ? X64_REX_W : 0) | \
+ (((reg_modrm) > 7) ? X64_REX_R : 0) | \
+ (((reg_index) > 7) ? X64_REX_X : 0) | \
+ (((reg_rm_base_opcode) > 7) ? X64_REX_B : 0); \
+ if ((_x64_rex_bits != 0) || (((width) == 1))) *(inst)++ = X64_REX(_x64_rex_bits); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+typedef union {
+ uint64_t val;
+ unsigned char b[8];
+} x64_imm_buf;
+
+/* In 64 bit mode, all registers have a low byte subregister */
+#undef X86_IS_BYTE_REG
+#define X86_IS_BYTE_REG(reg) 1
+
+#define x64_modrm_mod(modrm) ((modrm) >> 6)
+#define x64_modrm_reg(modrm) (((modrm) >> 3) & 0x7)
+#define x64_modrm_rm(modrm) ((modrm) & 0x7)
+
+#define x64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3)
+#define x64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3)
+#define x64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3)
+
+#define x64_sib_scale(sib) ((sib) >> 6)
+#define x64_sib_index(sib) (((sib) >> 3) & 0x7)
+#define x64_sib_base(sib) ((sib) & 0x7)
+
+#define x64_is_imm32(val) ((int64_t)val >= -((int64_t)1<<31) && (int64_t)val <= (((int64_t)1<<31)-1))
+
+#define x86_imm_emit64(inst,imm) \
+ do { \
+ x64_imm_buf imb; \
+ imb.val = (uint64_t) (imm); \
+ *(inst)++ = imb.b [0]; \
+ *(inst)++ = imb.b [1]; \
+ *(inst)++ = imb.b [2]; \
+ *(inst)++ = imb.b [3]; \
+ *(inst)++ = imb.b [4]; \
+ *(inst)++ = imb.b [5]; \
+ *(inst)++ = imb.b [6]; \
+ *(inst)++ = imb.b [7]; \
+ } while (0)
+
+#define x64_membase_emit(inst,reg,basereg,disp) do { \
+ if ((basereg) == X64_RIP) { \
+ x86_address_byte ((inst), 0, (reg)&0x7, 5); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ else \
+ x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \
+} while (0)
+
+#define x64_memindex_emit(inst, reg, basereg, disp, indexreg, shift) \
+ x86_memindex_emit((inst), ((reg) & 0x7), ((basereg) & 0x7), (disp), ((indexreg) & 0x7), (shift))
+
+#define x64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \
+ do { \
+ if (x86_is_imm8((imm))) { \
+ x64_emit_rex(inst, size, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((reg) == X64_RAX) { \
+ x64_emit_rex(inst, size, 0, 0, 0); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \
+ x86_imm_emit32 ((inst), (imm)); \
+ } else { \
+ x64_emit_rex(inst, size, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \
+ do { \
+ x64_emit_rex(inst, size, (dreg), 0, (reg)); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+/* NaCl modules may not directly update RSP or RBP other than direct copies */
+/* between them. Instead the lower 4 bytes are updated and then added to R15 */
+#define x64_is_nacl_stack_reg(reg) (((reg) == X64_RSP) || ((reg) == X64_RBP))
+
+#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) \
+ do{ \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg(reg)) { \
+ if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \
+ g_assert_not_reached(); \
+ x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \
+ /* Use LEA instead of ADD to preserve flags */ \
+ x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \
+ } \
+ x64_codegen_post(inst); \
+ } while(0)
+
+#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg((dreg)) && ((reg) != X64_R15)) { \
+ if (((opc) != X86_ADD && (opc) != X86_SUB)) \
+ g_assert_not_reached(); \
+ x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \
+ /* Use LEA instead of ADD to preserve flags */ \
+ x64_lea_memindex_size((inst), (dreg), (dreg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#else
+
+#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) \
+ x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size))
+
+#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) \
+ x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size))
+
+#endif /*__native_client_codegen__*/
+
+#define x64_alu_reg_imm(inst,opc,reg,imm) x64_alu_reg_imm_size((inst),(opc),(reg),(imm),8)
+
+#define x64_alu_reg_reg(inst,opc,dreg,reg) x64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8)
+
+#define x64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst),(size),(reg),0,(basereg)); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x64_membase_emit (inst, reg, basereg, disp); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define x64_mov_regp_reg(inst,regp,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, (regp)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_regp_emit ((inst), (reg), (regp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, (basereg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_mem_reg(inst,mem,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, 0); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_address_byte ((inst), 0, (reg), 4); \
+ x86_address_byte ((inst), 0, 4, 5); \
+ x86_imm_emit32 ((inst), (mem)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (dreg), 0, (reg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_reg_mem_body(inst,reg,mem,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, 0); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_address_byte ((inst), 0, (reg), 4); \
+ x86_address_byte ((inst), 0, 4, 5); \
+ x86_imm_emit32 ((inst), (mem)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+/* We have to re-base memory reads because memory isn't zero based. */
+#define x64_mov_reg_mem(inst,reg,mem,size) \
+ do { \
+ x64_mov_reg_membase((inst),(reg),X64_R15,(mem),(size)); \
+ } while (0)
+#else
+#define x64_mov_reg_mem(inst,reg,mem,size) \
+ do { \
+ x64_mov_reg_mem_body((inst),(reg),(mem),(size)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x64_mov_reg_membase_body(inst,reg,basereg,disp,size) \
+ do { \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, (basereg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \
+ do { \
+ x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \
+ x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+
+#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg((reg))) { \
+ /* Clear upper 32 bits with mov of size 4 */ \
+ x64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \
+ /* Add %r15 using LEA to preserve flags */ \
+ x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \
+ } \
+ x64_codegen_post(inst); \
+ } while(0)
+
+#define x64_mov_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg((reg))) { \
+ /* Clear upper 32 bits with mov of size 4 */ \
+ x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \
+ /* Add %r15 */ \
+ x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#else
+
+#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \
+ x64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size))
+#define x64_mov_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \
+ } while (0)
+
+#endif /*__native_client_codegen__*/
+
+#define x64_movzx_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, (size), (reg), 0, (basereg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \
+ case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \
+ case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsxd_reg_mem(inst,reg,mem) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst,8,(reg),0,0); \
+ *(inst)++ = (unsigned char)0x63; \
+ x86_mem_emit ((inst), ((reg)&0x7), (mem)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsxd_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst,8,(reg),0,(basereg)); \
+ *(inst)++ = (unsigned char)0x63; \
+ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsxd_reg_memindex(inst, reg, basereg, disp, indexreg, shift) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst,8,(reg),0,(basereg)); \
+ *(inst)++ = (unsigned char)0x63; \
+ x64_memindex_emit((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsxd_reg_reg(inst,dreg,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst,8,(dreg),0,(reg)); \
+ *(inst)++ = (unsigned char)0x63; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+/* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of
+ * 32-bit immediate. Pepper with casts to avoid warnings.
+ */
+#define x64_mov_reg_imm_size(inst,reg,imm,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, (size), 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \
+ if ((size) == 8) \
+ x86_imm_emit64 ((inst), (uint64_t)(imm)); \
+ else \
+ x86_imm_emit32 ((inst), (int)(uint64_t)(imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_reg_imm(inst,reg,imm) \
+ do { \
+ int _x64_width_temp = ((uint64_t)(imm) == (uint64_t)(int)(uint64_t)(imm)); \
+ x64_codegen_pre(inst); \
+ x64_mov_reg_imm_size ((inst), (reg), (imm), (_x64_width_temp ? 4 : 8)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_set_reg_template(inst,reg) x64_mov_reg_imm_size ((inst),(reg), 0, 8)
+
+#define x64_set_template(inst,reg) x64_set_reg_template((inst),(reg))
+
+#define x64_mov_membase_imm(inst,basereg,disp,imm,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \
+ if ((size) == 1) { \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+
+
+#define x64_lea_membase_body(inst,reg,basereg,disp) \
+ do { \
+ x64_emit_rex(inst, 8, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+/* NaCl modules may not write directly into RSP/RBP. Instead, use a */
+/* 32-bit LEA and add R15 to the effective address */
+#define x64_lea_membase(inst,reg,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg(reg)) { \
+ /* 32-bit LEA */ \
+ x64_emit_rex((inst), 4, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x64_membase_emit((inst), (reg), (basereg), (disp)); \
+ /* Use a 64-bit LEA instead of an ADD to preserve flags */ \
+ x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_lea_membase_body((inst), (reg), (basereg), (disp)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+#else
+#define x64_lea_membase(inst,reg,basereg,disp) \
+ x64_lea_membase_body((inst), (reg), (basereg), (disp))
+#endif /*__native_client_codegen__*/
+
+/* Instruction are implicitly 64-bits so don't generate REX for just the size. */
+#define x64_push_reg(inst,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, 0, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+/* Instruction is implicitly 64-bits so don't generate REX for just the size. */
+#define x64_push_membase(inst,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, 0, 0, 0, (basereg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_pop_reg_body(inst,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, 0, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define x64_jump_reg_size(inst,reg,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \
+ x64_alu_reg_reg_size((inst), X86_ADD, (reg), X64_R15, 8); \
+ x64_emit_rex ((inst),0,0,0,(reg)); \
+ x86_jump_reg((inst),((reg)&0x7)); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define x64_jump_mem_size(inst,mem,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_mov_reg_mem((inst), (mem), X64_R11, 4); \
+ x64_jump_reg_size((inst), X64_R11, 4); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+#define x64_call_reg_internal(inst,reg) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \
+ x64_alu_reg_reg_size((inst), X86_ADD, (reg), X64_R15, 8); \
+ x64_emit_rex((inst), 0, 0, 0, (reg)); \
+ x86_call_reg((inst), ((reg) & 0x7)); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+#define x64_call_reg(inst,reg) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_call_sequence_pre(inst); \
+ x64_call_reg_internal((inst), (reg)); \
+ x64_call_sequence_post(inst); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+
+#define x64_ret(inst) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_pop_reg_body((inst), X64_R11); \
+ x64_jump_reg_size((inst), X64_R11, 8); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_leave(inst) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_mov_reg_reg((inst), X64_RSP, X64_RBP, 8); \
+ x64_pop_reg_body((inst), X64_R11); \
+ x64_mov_reg_reg_size((inst), X64_RBP, X64_R11, 4); \
+ x64_alu_reg_reg_size((inst), X86_ADD, X64_RBP, X64_R15, 8); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_pop_reg(inst,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg((reg))) { \
+ x64_pop_reg_body((inst), X64_R11); \
+ x64_mov_reg_reg_size((inst), (reg), X64_R11, 4); \
+ x64_alu_reg_reg_size((inst), X86_ADD, (reg), X64_R15, 8); \
+ } else { \
+ x64_pop_reg_body((inst), (reg)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#else
+
+#define x64_call_reg(inst,reg) \
+ do { \
+ x64_emit_rex(inst, 0, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 2, ((reg) & 0x7)); \
+ } while (0)
+
+
+#define x64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0)
+#define x64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0)
+
+#define x64_pop_reg(inst,reg) x64_pop_reg_body((inst), (reg))
+
+#endif /*__native_client_codegen__*/
+
+#define x64_movsd_reg_regp(inst,reg,regp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ x64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsd_regp_reg(inst,regp,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ x64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movss_reg_regp(inst,reg,regp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ x64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movss_regp_reg(inst,regp,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ x64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsd_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ x64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movss_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ x64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsd_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ x64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movss_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ x64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+/* The original inc_reg opcode is used as the REX prefix */
+#define x64_inc_reg_size(inst,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst),(size),0,0,(reg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst),0,(reg) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_dec_reg_size(inst,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst),(size),0,0,(reg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst),1,(reg) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_fld_membase_size(inst,basereg,disp,is_double,size) do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst),0,0,0,(basereg)); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \
+ x64_membase_emit ((inst), 0, (basereg), (disp)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#if defined(__native_client_codegen__)
+
+/* The 3-7 byte NOP sequences in x64_padding_size below are all illegal in */
+/* 64-bit Native Client because they load into rSP/rBP or use duplicate */
+/* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */
+/* Intel64 and IA-32 Architectures Optimization Reference Manual and */
+/* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */
+
+#define x64_padding_size(inst,size) \
+ do { \
+ unsigned char *code_start = (inst); \
+ switch ((size)) { \
+ /* xchg %eax,%eax, recognized by hardware as a NOP */ \
+ case 1: *(inst)++ = 0x90; break; \
+ /* xchg %ax,%ax */ \
+ case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; \
+ break; \
+ /* nop (%rax) */ \
+ case 3: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ *(inst)++ = 0x00; \
+ break; \
+ /* nop 0x0(%rax) */ \
+ case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 1, 0, X64_RAX); \
+ x86_imm_emit8 ((inst), 0); \
+ break; \
+ /* nop 0x0(%rax,%rax) */ \
+ case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 1, 0, 4); \
+ x86_address_byte ((inst), 0, X64_RAX, X64_RAX); \
+ x86_imm_emit8 ((inst), 0); \
+ break; \
+ /* nopw 0x0(%rax,%rax) */ \
+ case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \
+ *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 1, 0, 4); \
+ x86_address_byte ((inst), 0, X64_RAX, X64_RAX); \
+ x86_imm_emit8 ((inst), 0); \
+ break; \
+ /* nop 0x0(%rax) (32-bit displacement) */ \
+ case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 2, 0, X64_RAX); \
+ x86_imm_emit32((inst), 0); \
+ break; \
+ /* nop 0x0(%rax,%rax) (32-bit displacement) */ \
+ case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 2, 0, 4); \
+ x86_address_byte ((inst), 0, X64_RAX, X64_RAX); \
+ x86_imm_emit32 ((inst), 0); \
+ break; \
+ default: \
+ g_assert_not_reached(); \
+ } \
+ g_assert(code_start + (size) == (unsigned char *)(inst)); \
+ } while (0)
+
+/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */
+#define x64_call_membase_size(inst,basereg,disp,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_call_sequence_pre(inst); \
+ x64_mov_reg_membase((inst), X64_R11, (basereg), (disp), 4); \
+ x64_call_reg_internal((inst), X64_R11); \
+ x64_call_sequence_post(inst); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define x64_jump_membase_size(inst,basereg,disp,size) \
+ do { \
+ x64_mov_reg_membase((inst), X64_R11, (basereg), (disp), 4); \
+ x64_jump_reg_size((inst), X64_R11, 4); \
+ } while (0)
+
+/* On Native Client we can't jump more than INT_MAX in either direction */
+#define x64_jump_code_size(inst,target,size) \
+ do { \
+ /* x86_jump_code used twice in case of */ \
+ /* relocation by x64_codegen_post */ \
+ uint8_t* jump_start; \
+ x64_codegen_pre(inst); \
+ assert(x64_is_imm32 ((int64_t)(target) - (int64_t)(inst))); \
+ x86_jump_code((inst),(target)); \
+ inst = x64_codegen_post(inst); \
+ jump_start = (inst); \
+ x86_jump_code((inst),(target)); \
+ mono_x64_patch(jump_start, (target)); \
+} while (0)
+
+#else
+
+/* From the AMD64 Software Optimization Manual */
+#define x64_padding_size(inst,size) \
+ do { \
+ switch ((size)) { \
+ case 1: *(inst)++ = 0x90; break; \
+ case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \
+ case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \
+ default: x64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \
+ }; \
+ } while (0)
+
+#define x64_call_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst),2, (basereg),(disp)); } while (0)
+#define x64_jump_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst), 4, (basereg), (disp)); } while (0)
+
+#define x64_jump_code_size(inst,target,size) do { \
+ if (x64_is_imm32 ((int64_t)(target) - (int64_t)(inst))) { \
+ x86_jump_code((inst),(target)); \
+ } else { \
+ x64_jump_membase ((inst), X64_RIP, 0); \
+ *(uint64_t*)(inst) = (uint64_t)(target); \
+ (inst) += 8; \
+ } \
+} while (0)
+
+#endif /*__native_client_codegen__*/
+
+/*
+ * SSE
+ */
+
+//TODO Reorganize SSE opcode defines.
+
+/* Two opcode SSE defines */
+#define emit_sse_reg_reg_op2(inst, dreg, reg, op1, op2) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), (op1), (op2), 0)
+
+#define emit_sse_reg_reg_op2_size(inst, dreg, reg, op1, op2, size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst), size, (dreg), 0, (reg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define emit_sse_reg_reg_op2_imm(inst, dreg, reg, op1, op2, imm) \
+ do { \
+ x64_codegen_pre(inst); \
+ emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define emit_sse_membase_reg_op2(inst, basereg, disp, reg, op1, op2) \
+ emit_sse_membase_reg_op2_size((inst), (basereg), (disp), (reg), (op1), (op2), 0)
+
+#define emit_sse_membase_reg_op2_size(inst, basereg, disp, reg, op1, op2, size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst), (size), (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define emit_sse_memindex_reg_op2(inst, basereg, disp, indexreg, shift, reg, op1, op2) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex (inst, 0, (reg), (indexreg), (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x64_memindex_emit((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ x64_codegen_post(inst); \
+ } while(0)
+
+#define emit_sse_reg_membase_op2(inst, dreg, basereg, disp, op1, op2) \
+ emit_sse_reg_membase_op2_size((inst), (dreg), (basereg), (disp), (op1), (op2), 0)
+
+#define emit_sse_reg_membase_op2_size(inst, dreg, basereg, disp, op1, op2, size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst), (size), (dreg), 0, (basereg) == X64_RIP ? 0 : (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x64_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define emit_sse_reg_memindex_op2(inst, dreg, basereg, disp, indexreg, shift, op1, op2) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex (inst, 0, (dreg), (indexreg), (basereg) == X64_RIP ? 0 : (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x64_memindex_emit((inst), (dreg), (basereg), (disp), (indexreg), (shift)); \
+ x64_codegen_post(inst); \
+ } while(0)
+
+/* Three opcode SSE defines */
+#define emit_opcode3(inst,op1,op2,op3) do { \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+} while (0)
+
+#define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \
+ x64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)(op1); \
+ x64_emit_rex ((inst), size, (dreg), 0, (reg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0)
+
+#define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \
+ x64_codegen_pre(inst); \
+ emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), (unsigned char)(op1)); \
+ x64_emit_rex ((inst), 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ x64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), (unsigned char)(op1)); \
+ x64_emit_rex ((inst), 0, (dreg), 0, (basereg) == X64_RIP ? 0 : (basereg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ x64_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+/* Four opcode SSE defines */
+
+#define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), (unsigned char)(op1)); \
+ x64_emit_rex ((inst), size, (dreg), 0, (reg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ *(inst)++ = (unsigned char)(op4); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0)
+
+/* specific SSE opcode defines */
+
+#define x64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57)
+
+#define x64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57)
+
+#define x64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54)
+
+#define x64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10)
+
+#define x64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10)
+
+#define x64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11)
+
+#define x64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11)
+
+#define x64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10)
+
+#define x64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f)
+
+#define x64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f)
+
+#define x64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e)
+
+#define x64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8)
+
+#define x64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size))
+
+#define x64_sse_cvttsd2si_reg_reg(inst,dreg,reg) x64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8)
+
+#define x64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size))
+
+#define x64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) x64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8)
+
+#define x64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size))
+
+#define x64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) x64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8)
+
+#define x64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a)
+
+#define x64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a)
+
+#define x64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58)
+
+#define x64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c)
+
+#define x64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59)
+
+#define x64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e)
+
+#define x64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51)
+
+
+#define x64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm))
+
+#define x64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm))
+
+
+#define x64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size))
+
+
+#define x64_sse_addps_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58)
+
+#define x64_sse_addps_reg_reg_size(inst, dreg, reg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x58, size)
+
+#define x64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e)
+
+#define x64_sse_mulps_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59)
+
+#define x64_sse_mulps_reg_reg_size(inst, dreg, reg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x59, size)
+
+#define x64_sse_subps_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c)
+
+#define x64_sse_subps_reg_reg_size(inst, dreg, reg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x5c, size)
+
+#define x64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f)
+
+#define x64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d)
+
+#define x64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm))
+
+#define x64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54)
+
+#define x64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55)
+
+#define x64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56)
+
+#define x64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57)
+
+#define x64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51)
+
+#define x64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52)
+
+#define x64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53)
+
+#define x64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0)
+
+#define x64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c)
+
+#define x64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d)
+
+#define x64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16)
+
+#define x64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12)
+
+
+#define x64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm))
+
+#define x64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm))
+
+#define x64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm))
+
+#define x64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm))
+
+#define x64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm))
+
+
+#define x64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58)
+
+#define x64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e)
+
+#define x64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59)
+
+#define x64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c)
+
+#define x64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f)
+
+#define x64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d)
+
+#define x64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm))
+
+#define x64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54)
+
+#define x64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55)
+
+#define x64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56)
+
+#define x64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51)
+
+#define x64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52)
+
+#define x64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53)
+
+#define x64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0)
+
+#define x64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c)
+
+#define x64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d)
+
+#define x64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12)
+
+
+#define x64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7)
+
+
+#define x64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb)
+
+#define x64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb)
+
+#define x64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef)
+
+
+#define x64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc)
+
+#define x64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd)
+
+#define x64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe)
+
+#define x64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4)
+
+
+#define x64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8)
+
+#define x64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9)
+
+#define x64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa)
+
+#define x64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb)
+
+
+#define x64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde)
+
+#define x64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e)
+
+#define x64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f)
+
+
+#define x64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c)
+
+#define x64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee)
+
+#define x64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d)
+
+
+#define x64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0)
+
+#define x64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3)
+
+
+#define x64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda)
+
+#define x64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a)
+
+#define x64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b)
+
+
+#define x64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38)
+
+#define x64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea)
+
+#define x64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39)
+
+
+#define x64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74)
+
+#define x64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75)
+
+#define x64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76)
+
+#define x64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29)
+
+
+#define x64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64)
+
+#define x64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65)
+
+#define x64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66)
+
+#define x64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37)
+
+
+#define x64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6)
+
+
+#define x64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60)
+
+#define x64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61)
+
+#define x64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62)
+
+#define x64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c)
+
+#define x64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14)
+
+#define x64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14)
+
+
+#define x64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68)
+
+#define x64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69)
+
+#define x64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a)
+
+#define x64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d)
+
+#define x64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15)
+
+#define x64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15)
+
+
+#define x64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63)
+
+#define x64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b)
+
+#define x64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67)
+
+#define x64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b)
+
+
+#define x64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc)
+
+#define x64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8)
+
+#define x64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd)
+
+#define x64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8)
+
+
+#define x64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec)
+
+#define x64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8)
+
+#define x64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed)
+
+#define x64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9)
+
+
+#define x64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5)
+
+#define x64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40)
+
+#define x64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4)
+
+#define x64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4)
+
+#define x64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5)
+
+
+#define x64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm))
+
+#define x64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1)
+
+
+#define x64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm))
+
+#define x64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1)
+
+
+#define x64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm))
+
+#define x64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1)
+
+
+#define x64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm))
+
+#define x64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2)
+
+
+#define x64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm))
+
+#define x64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2)
+
+
+#define x64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm))
+
+#define x64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2)
+
+
+#define x64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm))
+
+#define x64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3)
+
+
+#define x64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm))
+
+#define x64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3)
+
+
+#define x64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm))
+
+#define x64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3)
+
+
+#define x64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6)
+
+#define x64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B)
+
+#define x64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6)
+
+#define x64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A)
+
+#define x64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B)
+
+#define x64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A)
+
+#define x64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6)
+
+#define x64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B)
+
+
+#define x64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size))
+
+#define x64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size))
+
+#define x64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e)
+
+#define x64_sse_movhlps_reg_reg(inst, dreg, sreg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12)
+
+#define x64_sse_movhlps_reg_reg_size(inst, dreg, sreg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (sreg), 0x0f, 0x12, size)
+
+#define x64_sse_movlhps_reg_reg(inst, dreg, sreg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16)
+
+#define x64_sse_movlhps_reg_reg_size(inst, dreg, sreg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (sreg), 0x0f, 0x16, size)
+
+#define x64_sse_movups_membase_reg(inst, basereg, disp, reg) \
+ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11)
+
+#define x64_sse_movups_membase_reg_size(inst, basereg, disp, reg, size) \
+ emit_sse_membase_reg_op2_size((inst), (basereg), (disp), (reg), 0x0f, 0x11, (size))
+
+#define x64_sse_movups_reg_membase(inst, dreg, basereg, disp) \
+ emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10)
+
+#define x64_sse_movups_reg_membase_size(inst, dreg, basereg, disp, size) \
+ emit_sse_reg_membase_op2_size((inst), (dreg), (basereg), (disp), 0x0f, 0x10, (size))
+
+#define x64_sse_movaps_membase_reg(inst, basereg, disp, reg) \
+ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29)
+
+#define x64_sse_movaps_membase_reg_size(inst, basereg, disp, reg, size) \
+ emit_sse_membase_reg_op2_size((inst), (basereg), (disp), (reg), 0x0f, 0x29, (size))
+
+#define x64_sse_movaps_memindex_reg(inst, basereg, disp, indexreg, shift, reg) \
+ emit_sse_memindex_reg_op2((inst), (basereg), (disp), (indexreg), (shift), (reg), 0x0f, 0x29);
+
+#define x64_sse_movaps_reg_membase(inst, dreg, basereg, disp) \
+ emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28)
+
+#define x64_sse_movaps_reg_membase_size(inst, dreg, basereg, disp, size) \
+ emit_sse_reg_membase_op2_size((inst), (dreg), (basereg), (disp), 0x0f, 0x28, (size))
+
+#define x64_sse_movaps_reg_memindex(inst, dreg, basereg, disp, indexreg, shift) \
+ emit_sse_reg_memindex_op2((inst), (dreg), (basereg), (disp), (indexreg), (shift), 0x0f, 0x28);
+
+#define x64_sse_movaps_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28)
+
+#define x64_sse_movaps_reg_reg_size(inst, dreg, reg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x28, size)
+
+#define x64_sse_movntps_membase_reg(inst, basereg, disp, reg) \
+ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x2b)
+
+#define x64_sse_movntps_memindex_reg(inst, basereg, disp, indexreg, shift, reg) \
+ emit_sse_memindex_reg_op2((inst), (basereg), (disp), (indexreg), (shift), (reg), 0x0f, 0x2b)
+
+#define x64_sse_prefetch_reg_membase(inst, arg, basereg, disp) \
+ emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18)
+
+#define x64_sse_movdqa_membase_reg(inst, basereg, disp, reg) \
+ emit_sse_membase_reg((inst), (basereg), (disp), (reg), 0x66, 0x0f, 0x7f)
+
+#define x64_sse_movdqa_reg_membase(inst, dreg, basereg, disp) \
+ emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6f)
+
+#define x64_sse_movdqa_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6f)
+
+/* Generated from x86-codegen.h */
+
+#define x64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0)
+#define x64_cld_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_cld(inst); x64_codegen_post(inst); } while (0)
+#define x64_stosb_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); x64_codegen_post(inst); } while (0)
+#define x64_stosl_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); x64_codegen_post(inst); } while (0)
+#define x64_stosd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); x64_codegen_post(inst); } while (0)
+#define x64_movsb_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); x64_codegen_post(inst); } while (0)
+#define x64_movsl_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); x64_codegen_post(inst); } while (0)
+#define x64_movsd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); x64_codegen_post(inst); } while (0)
+#define x64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0)
+#define x64_rdtsc_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); x64_codegen_post(inst); } while (0)
+#define x64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_xchg_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_xchg_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_inc_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_inc_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+//#define x64_inc_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_dec_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_dec_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+//#define x64_dec_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_not_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_not_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_not_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_neg_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_neg_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_neg_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_nop_size(inst,size) do { x64_codegen_pre(inst); x86_nop(inst); x64_codegen_post(inst); } while (0)
+//#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_alu_mem_imm_size(inst,opc,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_alu_mem_reg_size(inst,opc,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+//#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); x64_codegen_post(inst); } while (0)
+#define x64_alu_reg_mem_size(inst,opc,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0)
+//#define x64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_test_reg_imm_size(inst,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_test_mem_imm_size(inst,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_test_membase_imm_size(inst,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_test_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_test_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_test_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_shift_reg_imm_size(inst,opc,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_shift_mem_imm_size(inst,opc,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_shift_reg_size(inst,opc,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_shift_mem_size(inst,opc,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_shift_membase_size(inst,opc,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_shrd_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); x64_codegen_post(inst); } while (0)
+#define x64_shld_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); x64_codegen_post(inst); } while (0)
+#define x64_mul_reg_size(inst,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_mul_mem_size(inst,mem,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_mul_membase_size(inst,basereg,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_mem_size(inst,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_div_reg_size(inst,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_div_mem_size(inst,mem,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_div_membase_size(inst,basereg,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_mov_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_regp_reg_size(inst,regp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_mov_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_reg_mem_size(inst,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_clear_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_reg_imm_size(inst,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_mov_mem_imm_size(inst,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_lea_mem_size(inst,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0)
+//#define x64_lea_membase_size(inst,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); x64_codegen_post(inst); } while (0)
+#define x64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); x64_codegen_post(inst); } while (0)
+#define x64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); x64_codegen_post(inst); } while (0)
+#define x64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); x64_codegen_post(inst); } while (0)
+#define x64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); x64_codegen_post(inst); } while (0)
+#define x64_cdq_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); x64_codegen_post(inst); } while (0)
+#define x64_wait_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); x64_codegen_post(inst); } while (0)
+#define x64_fp_op_mem_size(inst,opc,mem,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); x64_codegen_post(inst); } while (0)
+#define x64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); x64_codegen_post(inst); } while (0)
+#define x64_fp_op_size(inst,opc,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); x64_codegen_post(inst); } while (0)
+#define x64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); x64_codegen_post(inst); } while (0)
+#define x64_fstp_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fcompp_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); x64_codegen_post(inst); } while (0)
+#define x64_fucompp_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); x64_codegen_post(inst); } while (0)
+#define x64_fnstsw_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); x64_codegen_post(inst); } while (0)
+#define x64_fnstcw_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_fnstcw_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_fldcw_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_fldcw_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_fchs_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); x64_codegen_post(inst); } while (0)
+#define x64_frem_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_frem(inst); x64_codegen_post(inst); } while (0)
+#define x64_fxch_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fcomi_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fcomip_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fucomi_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fucomip_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fld_size(inst,mem,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); x64_codegen_post(inst); } while (0)
+//#define x64_fld_membase_size(inst,basereg,disp,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); x64_codegen_post(inst); } while (0)
+#define x64_fld80_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_fld80_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_fild_size(inst,mem,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); x64_codegen_post(inst); } while (0)
+#define x64_fild_membase_size(inst,basereg,disp,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); x64_codegen_post(inst); } while (0)
+#define x64_fld_reg_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fldz_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); x64_codegen_post(inst); } while (0)
+#define x64_fld1_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); x64_codegen_post(inst); } while (0)
+#define x64_fldpi_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); x64_codegen_post(inst); } while (0)
+#define x64_fst_size(inst,mem,is_double,pop_stack,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); x64_codegen_post(inst); } while (0)
+#define x64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); x64_codegen_post(inst); } while (0)
+#define x64_fst80_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_fst80_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_fist_pop_size(inst,mem,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); x64_codegen_post(inst); } while (0)
+#define x64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); x64_codegen_post(inst); } while (0)
+#define x64_fstsw_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); x64_codegen_post(inst); } while (0)
+#define x64_fist_membase_size(inst,basereg,disp,is_int,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); x64_codegen_post(inst); } while (0)
+//#define x64_push_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_push_regp_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_push_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+//#define x64_push_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); x64_codegen_post(inst); } while (0)
+#define x64_push_imm_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); x64_codegen_post(inst); } while (0)
+//#define x64_pop_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_pop_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_pop_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_pushad_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); x64_codegen_post(inst); } while (0)
+#define x64_pushfd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); x64_codegen_post(inst); } while (0)
+#define x64_popad_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); x64_codegen_post(inst); } while (0)
+#define x64_popfd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); x64_codegen_post(inst); } while (0)
+#define x64_loop_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_loope_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_loopne_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_jump32_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_jump8_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); x64_codegen_post(inst); } while (0)
+
+#if !defined( __native_client_codegen__ )
+/* Defined above for Native Client, so they can be used in other macros */
+#define x64_jump_reg_size(inst,reg,size) do { x64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0)
+#define x64_jump_mem_size(inst,mem,size) do { x64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0)
+#endif
+
+#define x64_jump_disp_size(inst,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0)
+#define x64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0)
+#define x64_branch_size_body(inst,cond,target,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); x64_codegen_post(inst); } while (0)
+
+#if defined(__native_client_codegen__)
+#define x64_branch_size(inst,cond,target,is_signed,size) \
+ do { \
+ /* x64_branch_size_body used twice in */ \
+ /* case of relocation by x64_codegen_post */ \
+ uint8_t* branch_start; \
+ x64_codegen_pre(inst); \
+ x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \
+ inst = x64_codegen_post(inst); \
+ branch_start = inst; \
+ x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \
+ mono_x64_patch(branch_start, (target)); \
+ } while (0)
+#else
+#define x64_branch_size(inst,cond,target,is_signed,size) do { x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x64_branch_disp_size(inst,cond,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_set_reg_size(inst,cond,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_set_mem_size(inst,cond,mem,is_signed,size) do { x64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); x64_codegen_post(inst); } while (0)
+//#define x64_call_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_call_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+
+#if defined(__native_client_codegen__)
+/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */
+#define x64_call_imm_size(inst,disp,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_call_sequence_pre((inst)); \
+ x86_call_imm((inst),(disp)); \
+ x64_call_sequence_post((inst)); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+/* x86_call_code is called twice below, first so we can get the size of the */
+/* call sequence, and again so the exact offset from "inst" is used, since */
+/* the sequence could have moved from x64_call_sequence_post. */
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define x64_call_code_size(inst,target,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ uint8_t* adjusted_start; \
+ uint8_t* call_start; \
+ x64_call_sequence_pre((inst)); \
+ x86_call_code((inst),(target)); \
+ adjusted_start = x64_call_sequence_post((inst)); \
+ call_start = adjusted_start; \
+ x86_call_code(adjusted_start, (target)); \
+ x64_codegen_post((inst)); \
+ mono_x64_patch(call_start, (target)); \
+ } while (0)
+
+#else
+
+#define x64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0)
+#define x64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0)
+
+#endif /*__native_client_codegen__*/
+
+//#define x64_ret_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); x64_codegen_post(inst); } while (0)
+#define x64_ret_imm_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_enter_size(inst,framesize) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); x64_codegen_post(inst); } while (0)
+//#define x64_leave_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); x64_codegen_post(inst); } while (0)
+#define x64_sahf_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); x64_codegen_post(inst); } while (0)
+#define x64_fsin_size(inst,size) do { x64_codegen_pre(inst); x86_fsin(inst); x64_codegen_post(inst); } while (0)
+#define x64_fcos_size(inst,size) do { x64_codegen_pre(inst); x86_fcos(inst); x64_codegen_post(inst); } while (0)
+#define x64_fabs_size(inst,size) do { x64_codegen_pre(inst); x86_fabs(inst); x64_codegen_post(inst); } while (0)
+#define x64_ftst_size(inst,size) do { x64_codegen_pre(inst); x86_ftst(inst); x64_codegen_post(inst); } while (0)
+#define x64_fxam_size(inst,size) do { x64_codegen_pre(inst); x86_fxam(inst); x64_codegen_post(inst); } while (0)
+#define x64_fpatan_size(inst,size) do { x64_codegen_pre(inst); x86_fpatan(inst); x64_codegen_post(inst); } while (0)
+#define x64_fprem_size(inst,size) do { x64_codegen_pre(inst); x86_fprem(inst); x64_codegen_post(inst); } while (0)
+#define x64_fprem1_size(inst,size) do { x64_codegen_pre(inst); x86_fprem1(inst); x64_codegen_post(inst); } while (0)
+#define x64_frndint_size(inst,size) do { x64_codegen_pre(inst); x86_frndint(inst); x64_codegen_post(inst); } while (0)
+#define x64_fsqrt_size(inst,size) do { x64_codegen_pre(inst); x86_fsqrt(inst); x64_codegen_post(inst); } while (0)
+#define x64_fptan_size(inst,size) do { x64_codegen_pre(inst); x86_fptan(inst); x64_codegen_post(inst); } while (0)
+//#define x64_padding_size(inst,size) do { x64_codegen_pre(inst); x86_padding((inst),(size)); x64_codegen_post(inst); } while (0)
+#define x64_prolog_size(inst,frame_size,reg_mask,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); x64_codegen_post(inst); } while (0)
+#define x64_epilog_size(inst,reg_mask,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); x64_codegen_post(inst); } while (0)
+#define x64_xadd_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); x64_codegen_post(inst); } while (0)
+#define x64_xadd_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); x64_codegen_post(inst); } while (0)
+#define x64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); x64_codegen_post(inst); } while (0)
+
+
+
+
+#define x64_breakpoint(inst) x64_breakpoint_size(inst,8)
+#define x64_cld(inst) x64_cld_size(inst,8)
+#define x64_stosb(inst) x64_stosb_size(inst,8)
+#define x64_stosl(inst) x64_stosl_size(inst,8)
+#define x64_stosd(inst) x64_stosd_size(inst,8)
+#define x64_movsb(inst) x64_movsb_size(inst,8)
+#define x64_movsl(inst) x64_movsl_size(inst,8)
+#define x64_movsd(inst) x64_movsd_size(inst,8)
+#define x64_prefix(inst,p) x64_prefix_size(inst,p,8)
+#define x64_rdtsc(inst) x64_rdtsc_size(inst,8)
+#define x64_cmpxchg_reg_reg(inst,dreg,reg) x64_cmpxchg_reg_reg_size(inst,dreg,reg,8)
+#define x64_cmpxchg_mem_reg(inst,mem,reg) x64_cmpxchg_mem_reg_size(inst,mem,reg,8)
+#define x64_cmpxchg_membase_reg(inst,basereg,disp,reg) x64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8)
+#define x64_xchg_reg_reg(inst,dreg,reg,size) x64_xchg_reg_reg_size(inst,dreg,reg,size)
+#define x64_xchg_mem_reg(inst,mem,reg,size) x64_xchg_mem_reg_size(inst,mem,reg,size)
+#define x64_xchg_membase_reg(inst,basereg,disp,reg,size) x64_xchg_membase_reg_size(inst,basereg,disp,reg,size)
+#define x64_xadd_reg_reg(inst,dreg,reg,size) x64_xadd_reg_reg_size(inst,dreg,reg,size)
+#define x64_xadd_mem_reg(inst,mem,reg,size) x64_xadd_mem_reg_size(inst,mem,reg,size)
+#define x64_xadd_membase_reg(inst,basereg,disp,reg,size) x64_xadd_membase_reg_size(inst,basereg,disp,reg,size)
+#define x64_inc_mem(inst,mem) x64_inc_mem_size(inst,mem,8)
+#define x64_inc_membase(inst,basereg,disp) x64_inc_membase_size(inst,basereg,disp,8)
+#define x64_inc_reg(inst,reg) x64_inc_reg_size(inst,reg,8)
+#define x64_dec_mem(inst,mem) x64_dec_mem_size(inst,mem,8)
+#define x64_dec_membase(inst,basereg,disp) x64_dec_membase_size(inst,basereg,disp,8)
+#define x64_dec_reg(inst,reg) x64_dec_reg_size(inst,reg,8)
+#define x64_not_mem(inst,mem) x64_not_mem_size(inst,mem,8)
+#define x64_not_membase(inst,basereg,disp) x64_not_membase_size(inst,basereg,disp,8)
+#define x64_not_reg(inst,reg) x64_not_reg_size(inst,reg,8)
+#define x64_neg_mem(inst,mem) x64_neg_mem_size(inst,mem,8)
+#define x64_neg_membase(inst,basereg,disp) x64_neg_membase_size(inst,basereg,disp,8)
+#define x64_neg_reg(inst,reg) x64_neg_reg_size(inst,reg,8)
+#define x64_nop(inst) x64_nop_size(inst,8)
+//#define x64_alu_reg_imm(inst,opc,reg,imm) x64_alu_reg_imm_size(inst,opc,reg,imm,8)
+#define x64_alu_mem_imm(inst,opc,mem,imm) x64_alu_mem_imm_size(inst,opc,mem,imm,8)
+#define x64_alu_membase_imm(inst,opc,basereg,disp,imm) x64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8)
+#define x64_alu_mem_reg(inst,opc,mem,reg) x64_alu_mem_reg_size(inst,opc,mem,reg,8)
+#define x64_alu_membase_reg(inst,opc,basereg,disp,reg) x64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8)
+//#define x64_alu_reg_reg(inst,opc,dreg,reg) x64_alu_reg_reg_size(inst,opc,dreg,reg,8)
+#define x64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) x64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8)
+#define x64_alu_reg_mem(inst,opc,reg,mem) x64_alu_reg_mem_size(inst,opc,reg,mem,8)
+#define x64_alu_reg_membase(inst,opc,reg,basereg,disp) x64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8)
+#define x64_test_reg_imm(inst,reg,imm) x64_test_reg_imm_size(inst,reg,imm,8)
+#define x64_test_mem_imm(inst,mem,imm) x64_test_mem_imm_size(inst,mem,imm,8)
+#define x64_test_membase_imm(inst,basereg,disp,imm) x64_test_membase_imm_size(inst,basereg,disp,imm,8)
+#define x64_test_reg_reg(inst,dreg,reg) x64_test_reg_reg_size(inst,dreg,reg,8)
+#define x64_test_mem_reg(inst,mem,reg) x64_test_mem_reg_size(inst,mem,reg,8)
+#define x64_test_membase_reg(inst,basereg,disp,reg) x64_test_membase_reg_size(inst,basereg,disp,reg,8)
+#define x64_shift_reg_imm(inst,opc,reg,imm) x64_shift_reg_imm_size(inst,opc,reg,imm,8)
+#define x64_shift_mem_imm(inst,opc,mem,imm) x64_shift_mem_imm_size(inst,opc,mem,imm,8)
+#define x64_shift_membase_imm(inst,opc,basereg,disp,imm) x64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8)
+#define x64_shift_reg(inst,opc,reg) x64_shift_reg_size(inst,opc,reg,8)
+#define x64_shift_mem(inst,opc,mem) x64_shift_mem_size(inst,opc,mem,8)
+#define x64_shift_membase(inst,opc,basereg,disp) x64_shift_membase_size(inst,opc,basereg,disp,8)
+#define x64_shrd_reg(inst,dreg,reg) x64_shrd_reg_size(inst,dreg,reg,8)
+#define x64_shrd_reg_imm(inst,dreg,reg,shamt) x64_shrd_reg_imm_size(inst,dreg,reg,shamt,8)
+#define x64_shld_reg(inst,dreg,reg) x64_shld_reg_size(inst,dreg,reg,8)
+#define x64_shld_reg_imm(inst,dreg,reg,shamt) x64_shld_reg_imm_size(inst,dreg,reg,shamt,8)
+#define x64_mul_reg(inst,reg,is_signed) x64_mul_reg_size(inst,reg,is_signed,8)
+#define x64_mul_mem(inst,mem,is_signed) x64_mul_mem_size(inst,mem,is_signed,8)
+#define x64_mul_membase(inst,basereg,disp,is_signed) x64_mul_membase_size(inst,basereg,disp,is_signed,8)
+#define x64_imul_reg_reg(inst,dreg,reg) x64_imul_reg_reg_size(inst,dreg,reg,8)
+#define x64_imul_reg_mem(inst,reg,mem) x64_imul_reg_mem_size(inst,reg,mem,8)
+#define x64_imul_reg_membase(inst,reg,basereg,disp) x64_imul_reg_membase_size(inst,reg,basereg,disp,8)
+#define x64_imul_reg_reg_imm(inst,dreg,reg,imm) x64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8)
+#define x64_imul_reg_mem_imm(inst,reg,mem,imm) x64_imul_reg_mem_imm_size(inst,reg,mem,imm,8)
+#define x64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) x64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8)
+#define x64_div_reg(inst,reg,is_signed) x64_div_reg_size(inst,reg,is_signed,8)
+#define x64_div_mem(inst,mem,is_signed) x64_div_mem_size(inst,mem,is_signed,8)
+#define x64_div_membase(inst,basereg,disp,is_signed) x64_div_membase_size(inst,basereg,disp,is_signed,8)
+//#define x64_mov_mem_reg(inst,mem,reg,size) x64_mov_mem_reg_size(inst,mem,reg,size)
+//#define x64_mov_regp_reg(inst,regp,reg,size) x64_mov_regp_reg_size(inst,regp,reg,size)
+//#define x64_mov_membase_reg(inst,basereg,disp,reg,size) x64_mov_membase_reg_size(inst,basereg,disp,reg,size)
+#define x64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) x64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size)
+//#define x64_mov_reg_reg(inst,dreg,reg,size) x64_mov_reg_reg_size(inst,dreg,reg,size)
+//#define x64_mov_reg_mem(inst,reg,mem,size) x64_mov_reg_mem_size(inst,reg,mem,size)
+//#define x64_mov_reg_membase(inst,reg,basereg,disp,size) x64_mov_reg_membase_size(inst,reg,basereg,disp,size)
+#define x64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size)
+#define x64_clear_reg(inst,reg) x64_clear_reg_size(inst,reg,8)
+//#define x64_mov_reg_imm(inst,reg,imm) x64_mov_reg_imm_size(inst,reg,imm,8)
+#define x64_mov_mem_imm(inst,mem,imm,size) x64_mov_mem_imm_size(inst,mem,imm,size)
+//#define x64_mov_membase_imm(inst,basereg,disp,imm,size) x64_mov_membase_imm_size(inst,basereg,disp,imm,size)
+#define x64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) x64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size)
+#define x64_lea_mem(inst,reg,mem) x64_lea_mem_size(inst,reg,mem,8)
+//#define x64_lea_membase(inst,reg,basereg,disp) x64_lea_membase_size(inst,reg,basereg,disp,8)
+#define x64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) x64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8)
+#define x64_widen_reg(inst,dreg,reg,is_signed,is_half) x64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8)
+#define x64_widen_mem(inst,dreg,mem,is_signed,is_half) x64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8)
+#define x64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) x64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8)
+#define x64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) x64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8)
+#define x64_cdq(inst) x64_cdq_size(inst,8)
+#define x64_wait(inst) x64_wait_size(inst,8)
+#define x64_fp_op_mem(inst,opc,mem,is_double) x64_fp_op_mem_size(inst,opc,mem,is_double,8)
+#define x64_fp_op_membase(inst,opc,basereg,disp,is_double) x64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8)
+#define x64_fp_op(inst,opc,index) x64_fp_op_size(inst,opc,index,8)
+#define x64_fp_op_reg(inst,opc,index,pop_stack) x64_fp_op_reg_size(inst,opc,index,pop_stack,8)
+#define x64_fp_int_op_membase(inst,opc,basereg,disp,is_int) x64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8)
+#define x64_fstp(inst,index) x64_fstp_size(inst,index,8)
+#define x64_fcompp(inst) x64_fcompp_size(inst,8)
+#define x64_fucompp(inst) x64_fucompp_size(inst,8)
+#define x64_fnstsw(inst) x64_fnstsw_size(inst,8)
+#define x64_fnstcw(inst,mem) x64_fnstcw_size(inst,mem,8)
+#define x64_fnstcw_membase(inst,basereg,disp) x64_fnstcw_membase_size(inst,basereg,disp,8)
+#define x64_fldcw(inst,mem) x64_fldcw_size(inst,mem,8)
+#define x64_fldcw_membase(inst,basereg,disp) x64_fldcw_membase_size(inst,basereg,disp,8)
+#define x64_fchs(inst) x64_fchs_size(inst,8)
+#define x64_frem(inst) x64_frem_size(inst,8)
+#define x64_fxch(inst,index) x64_fxch_size(inst,index,8)
+#define x64_fcomi(inst,index) x64_fcomi_size(inst,index,8)
+#define x64_fcomip(inst,index) x64_fcomip_size(inst,index,8)
+#define x64_fucomi(inst,index) x64_fucomi_size(inst,index,8)
+#define x64_fucomip(inst,index) x64_fucomip_size(inst,index,8)
+#define x64_fld(inst,mem,is_double) x64_fld_size(inst,mem,is_double,8)
+#define x64_fld_membase(inst,basereg,disp,is_double) x64_fld_membase_size(inst,basereg,disp,is_double,8)
+#define x64_fld80_mem(inst,mem) x64_fld80_mem_size(inst,mem,8)
+#define x64_fld80_membase(inst,basereg,disp) x64_fld80_membase_size(inst,basereg,disp,8)
+#define x64_fild(inst,mem,is_long) x64_fild_size(inst,mem,is_long,8)
+#define x64_fild_membase(inst,basereg,disp,is_long) x64_fild_membase_size(inst,basereg,disp,is_long,8)
+#define x64_fld_reg(inst,index) x64_fld_reg_size(inst,index,8)
+#define x64_fldz(inst) x64_fldz_size(inst,8)
+#define x64_fld1(inst) x64_fld1_size(inst,8)
+#define x64_fldpi(inst) x64_fldpi_size(inst,8)
+#define x64_fst(inst,mem,is_double,pop_stack) x64_fst_size(inst,mem,is_double,pop_stack,8)
+#define x64_fst_membase(inst,basereg,disp,is_double,pop_stack) x64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8)
+#define x64_fst80_mem(inst,mem) x64_fst80_mem_size(inst,mem,8)
+#define x64_fst80_membase(inst,basereg,disp) x64_fst80_membase_size(inst,basereg,disp,8)
+#define x64_fist_pop(inst,mem,is_long) x64_fist_pop_size(inst,mem,is_long,8)
+#define x64_fist_pop_membase(inst,basereg,disp,is_long) x64_fist_pop_membase_size(inst,basereg,disp,is_long,8)
+#define x64_fstsw(inst) x64_fstsw_size(inst,8)
+#define x64_fist_membase(inst,basereg,disp,is_int) x64_fist_membase_size(inst,basereg,disp,is_int,8)
+//#define x64_push_reg(inst,reg) x64_push_reg_size(inst,reg,8)
+#define x64_push_regp(inst,reg) x64_push_regp_size(inst,reg,8)
+#define x64_push_mem(inst,mem) x64_push_mem_size(inst,mem,8)
+//#define x64_push_membase(inst,basereg,disp) x64_push_membase_size(inst,basereg,disp,8)
+#define x64_push_memindex(inst,basereg,disp,indexreg,shift) x64_push_memindex_size(inst,basereg,disp,indexreg,shift,8)
+#define x64_push_imm(inst,imm) x64_push_imm_size(inst,imm,8)
+//#define x64_pop_reg(inst,reg) x64_pop_reg_size(inst,reg,8)
+#define x64_pop_mem(inst,mem) x64_pop_mem_size(inst,mem,8)
+#define x64_pop_membase(inst,basereg,disp) x64_pop_membase_size(inst,basereg,disp,8)
+#define x64_pushad(inst) x64_pushad_size(inst,8)
+#define x64_pushfd(inst) x64_pushfd_size(inst,8)
+#define x64_popad(inst) x64_popad_size(inst,8)
+#define x64_popfd(inst) x64_popfd_size(inst,8)
+#define x64_loop(inst,imm) x64_loop_size(inst,imm,8)
+#define x64_loope(inst,imm) x64_loope_size(inst,imm,8)
+#define x64_loopne(inst,imm) x64_loopne_size(inst,imm,8)
+#define x64_jump32(inst,imm) x64_jump32_size(inst,imm,8)
+#define x64_jump8(inst,imm) x64_jump8_size(inst,imm,8)
+#define x64_jump_reg(inst,reg) x64_jump_reg_size(inst,reg,8)
+#define x64_jump_mem(inst,mem) x64_jump_mem_size(inst,mem,8)
+#define x64_jump_membase(inst,basereg,disp) x64_jump_membase_size(inst,basereg,disp,8)
+#define x64_jump_code(inst,target) x64_jump_code_size(inst,target,8)
+#define x64_jump_disp(inst,disp) x64_jump_disp_size(inst,disp,8)
+#define x64_branch8(inst,cond,imm,is_signed) x64_branch8_size(inst,cond,imm,is_signed,8)
+#define x64_branch32(inst,cond,imm,is_signed) x64_branch32_size(inst,cond,imm,is_signed,8)
+#define x64_branch(inst,cond,target,is_signed) x64_branch_size(inst,cond,target,is_signed,8)
+#define x64_branch_disp(inst,cond,disp,is_signed) x64_branch_disp_size(inst,cond,disp,is_signed,8)
+#define x64_set_reg(inst,cond,reg,is_signed) x64_set_reg_size(inst,cond,reg,is_signed,8)
+#define x64_set_mem(inst,cond,mem,is_signed) x64_set_mem_size(inst,cond,mem,is_signed,8)
+#define x64_set_membase(inst,cond,basereg,disp,is_signed) x64_set_membase_size(inst,cond,basereg,disp,is_signed,8)
+#define x64_call_imm(inst,disp) x64_call_imm_size(inst,disp,8)
+//#define x64_call_reg(inst,reg) x64_call_reg_size(inst,reg,8)
+#define x64_call_mem(inst,mem) x64_call_mem_size(inst,mem,8)
+#define x64_call_membase(inst,basereg,disp) x64_call_membase_size(inst,basereg,disp,8)
+#define x64_call_code(inst,target) x64_call_code_size(inst,target,8)
+//#define x64_ret(inst) x64_ret_size(inst,8)
+#define x64_ret_imm(inst,imm) x64_ret_imm_size(inst,imm,8)
+#define x64_cmov_reg(inst,cond,is_signed,dreg,reg) x64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8)
+#define x64_cmov_mem(inst,cond,is_signed,reg,mem) x64_cmov_mem_size(inst,cond,is_signed,reg,mem,8)
+#define x64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) x64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8)
+#define x64_enter(inst,framesize) x64_enter_size(inst,framesize)
+//#define x64_leave(inst) x64_leave_size(inst,8)
+#define x64_sahf(inst) x64_sahf_size(inst,8)
+#define x64_fsin(inst) x64_fsin_size(inst,8)
+#define x64_fcos(inst) x64_fcos_size(inst,8)
+#define x64_fabs(inst) x64_fabs_size(inst,8)
+#define x64_ftst(inst) x64_ftst_size(inst,8)
+#define x64_fxam(inst) x64_fxam_size(inst,8)
+#define x64_fpatan(inst) x64_fpatan_size(inst,8)
+#define x64_fprem(inst) x64_fprem_size(inst,8)
+#define x64_fprem1(inst) x64_fprem1_size(inst,8)
+#define x64_frndint(inst) x64_frndint_size(inst,8)
+#define x64_fsqrt(inst) x64_fsqrt_size(inst,8)
+#define x64_fptan(inst) x64_fptan_size(inst,8)
+#define x64_padding(inst,size) x64_padding_size(inst,size)
+#define x64_prolog(inst,frame,reg_mask) x64_prolog_size(inst,frame,reg_mask,8)
+#define x64_epilog(inst,reg_mask) x64_epilog_size(inst,reg_mask,8)
+
+#endif // X64_H
diff --git a/lib/ffts/src/arch/x86/.gitignore b/lib/ffts/src/arch/x86/.gitignore
new file mode 100644
index 0000000..341daec
--- /dev/null
+++ b/lib/ffts/src/arch/x86/.gitignore
@@ -0,0 +1,6 @@
+/Makefile
+/Makefile.in
+/.libs
+/.deps
+/*.la
+/*.lo
diff --git a/lib/ffts/src/arch/x86/Makefile.am b/lib/ffts/src/arch/x86/Makefile.am
new file mode 100644
index 0000000..bab0f9e
--- /dev/null
+++ b/lib/ffts/src/arch/x86/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = x86-codegen.h \ No newline at end of file
diff --git a/lib/ffts/src/arch/x86/x86-codegen.h b/lib/ffts/src/arch/x86/x86-codegen.h
new file mode 100644
index 0000000..0052076
--- /dev/null
+++ b/lib/ffts/src/arch/x86/x86-codegen.h
@@ -0,0 +1,2647 @@
+/*
+ * x86-codegen.h: Macros for generating x86 code
+ *
+ * Authors:
+ * Paolo Molaro (lupus@ximian.com)
+ * Intel Corporation (ORP Project)
+ * Sergey Chaban (serge@wildwestsoftware.com)
+ * Dietmar Maurer (dietmar@ximian.com)
+ * Patrik Torstensson
+ *
+ * Copyright (C) 2000 Intel Corporation. All rights reserved.
+ * Copyright (C) 2001, 2002 Ximian, Inc.
+ */
+
+#ifndef X86_H
+#define X86_H
+
+#include <assert.h>
+
+#ifdef __native_client_codegen__
+extern gint8 nacl_align_byte;
+#endif /* __native_client_codegen__ */
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+#define x86_codegen_pre(inst_ptr_ptr, inst_len) do { mono_nacl_align_inst(inst_ptr_ptr, inst_len); } while (0)
+
+#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
+#define x86_call_sequence_post_val(inst) \
+ (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
+#define x86_call_sequence_pre(inst) x86_call_sequence_pre_val((inst))
+#define x86_call_sequence_post(inst) x86_call_sequence_post_val((inst))
+#else
+#define x86_codegen_pre(inst_ptr_ptr, inst_len) do {} while (0)
+
+/* Two variants are needed to avoid warnings */
+#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
+#define x86_call_sequence_post_val(inst) _code_start
+#define x86_call_sequence_pre(inst)
+#define x86_call_sequence_post(inst)
+#endif /* __native_client_codegen__ */
+
+/* x86 32bit register numbers */
+typedef enum {
+ X86_EAX = 0,
+ X86_ECX = 1,
+ X86_EDX = 2,
+ X86_EBX = 3,
+ X86_ESP = 4,
+ X86_EBP = 5,
+ X86_ESI = 6,
+ X86_EDI = 7,
+ X86_NREG
+} X86_Reg_No;
+
+typedef enum {
+ X86_XMM0,
+ X86_XMM1,
+ X86_XMM2,
+ X86_XMM3,
+ X86_XMM4,
+ X86_XMM5,
+ X86_XMM6,
+ X86_XMM7,
+ X86_XMM_NREG
+} X86_XMM_Reg_No;
+
+/* opcodes for ALU instructions */
+typedef enum {
+ X86_ADD = 0,
+ X86_OR = 1,
+ X86_ADC = 2,
+ X86_SBB = 3,
+ X86_AND = 4,
+ X86_SUB = 5,
+ X86_XOR = 6,
+ X86_CMP = 7,
+ X86_NALU
+} X86_ALU_Opcode;
+/*
+// opcodes for shift instructions
+*/
+typedef enum {
+ X86_SHLD,
+ X86_SHLR,
+ X86_ROL = 0,
+ X86_ROR = 1,
+ X86_RCL = 2,
+ X86_RCR = 3,
+ X86_SHL = 4,
+ X86_SHR = 5,
+ X86_SAR = 7,
+ X86_NSHIFT = 8
+} X86_Shift_Opcode;
+/*
+// opcodes for floating-point instructions
+*/
+typedef enum {
+ X86_FADD = 0,
+ X86_FMUL = 1,
+ X86_FCOM = 2,
+ X86_FCOMP = 3,
+ X86_FSUB = 4,
+ X86_FSUBR = 5,
+ X86_FDIV = 6,
+ X86_FDIVR = 7,
+ X86_NFP = 8
+} X86_FP_Opcode;
+/*
+// integer conditions codes
+*/
+typedef enum {
+ X86_CC_EQ = 0, X86_CC_E = 0, X86_CC_Z = 0,
+ X86_CC_NE = 1, X86_CC_NZ = 1,
+ X86_CC_LT = 2, X86_CC_B = 2, X86_CC_C = 2, X86_CC_NAE = 2,
+ X86_CC_LE = 3, X86_CC_BE = 3, X86_CC_NA = 3,
+ X86_CC_GT = 4, X86_CC_A = 4, X86_CC_NBE = 4,
+ X86_CC_GE = 5, X86_CC_AE = 5, X86_CC_NB = 5, X86_CC_NC = 5,
+ X86_CC_LZ = 6, X86_CC_S = 6,
+ X86_CC_GEZ = 7, X86_CC_NS = 7,
+ X86_CC_P = 8, X86_CC_PE = 8,
+ X86_CC_NP = 9, X86_CC_PO = 9,
+ X86_CC_O = 10,
+ X86_CC_NO = 11,
+ X86_NCC
+} X86_CC;
+
+/* FP status */
+enum {
+ X86_FP_C0 = 0x100,
+ X86_FP_C1 = 0x200,
+ X86_FP_C2 = 0x400,
+ X86_FP_C3 = 0x4000,
+ X86_FP_CC_MASK = 0x4500
+};
+
+/* FP control word */
+enum {
+ X86_FPCW_INVOPEX_MASK = 0x1,
+ X86_FPCW_DENOPEX_MASK = 0x2,
+ X86_FPCW_ZERODIV_MASK = 0x4,
+ X86_FPCW_OVFEX_MASK = 0x8,
+ X86_FPCW_UNDFEX_MASK = 0x10,
+ X86_FPCW_PRECEX_MASK = 0x20,
+ X86_FPCW_PRECC_MASK = 0x300,
+ X86_FPCW_ROUNDC_MASK = 0xc00,
+
+ /* values for precision control */
+ X86_FPCW_PREC_SINGLE = 0,
+ X86_FPCW_PREC_DOUBLE = 0x200,
+ X86_FPCW_PREC_EXTENDED = 0x300,
+
+ /* values for rounding control */
+ X86_FPCW_ROUND_NEAREST = 0,
+ X86_FPCW_ROUND_DOWN = 0x400,
+ X86_FPCW_ROUND_UP = 0x800,
+ X86_FPCW_ROUND_TOZERO = 0xc00
+};
+
+/*
+// prefix code
+*/
+typedef enum {
+ X86_LOCK_PREFIX = 0xF0,
+ X86_REPNZ_PREFIX = 0xF2,
+ X86_REPZ_PREFIX = 0xF3,
+ X86_REP_PREFIX = 0xF3,
+ X86_CS_PREFIX = 0x2E,
+ X86_SS_PREFIX = 0x36,
+ X86_DS_PREFIX = 0x3E,
+ X86_ES_PREFIX = 0x26,
+ X86_FS_PREFIX = 0x64,
+ X86_GS_PREFIX = 0x65,
+ X86_UNLIKELY_PREFIX = 0x2E,
+ X86_LIKELY_PREFIX = 0x3E,
+ X86_OPERAND_PREFIX = 0x66,
+ X86_ADDRESS_PREFIX = 0x67
+} X86_Prefix;
+
+static const unsigned char
+x86_cc_unsigned_map [X86_NCC] = {
+ 0x74, /* eq */
+ 0x75, /* ne */
+ 0x72, /* lt */
+ 0x76, /* le */
+ 0x77, /* gt */
+ 0x73, /* ge */
+ 0x78, /* lz */
+ 0x79, /* gez */
+ 0x7a, /* p */
+ 0x7b, /* np */
+ 0x70, /* o */
+ 0x71, /* no */
+};
+
+static const unsigned char
+x86_cc_signed_map [X86_NCC] = {
+ 0x74, /* eq */
+ 0x75, /* ne */
+ 0x7c, /* lt */
+ 0x7e, /* le */
+ 0x7f, /* gt */
+ 0x7d, /* ge */
+ 0x78, /* lz */
+ 0x79, /* gez */
+ 0x7a, /* p */
+ 0x7b, /* np */
+ 0x70, /* o */
+ 0x71, /* no */
+};
+
+typedef union {
+ int val;
+ unsigned char b [4];
+} x86_imm_buf;
+
+#define X86_NOBASEREG (-1)
+
+/*
+// bitvector mask for callee-saved registers
+*/
+#define X86_ESI_MASK (1<<X86_ESI)
+#define X86_EDI_MASK (1<<X86_EDI)
+#define X86_EBX_MASK (1<<X86_EBX)
+#define X86_EBP_MASK (1<<X86_EBP)
+
+#define X86_CALLEE_REGS ((1<<X86_EAX) | (1<<X86_ECX) | (1<<X86_EDX))
+#define X86_CALLER_REGS ((1<<X86_EBX) | (1<<X86_EBP) | (1<<X86_ESI) | (1<<X86_EDI))
+#define X86_BYTE_REGS ((1<<X86_EAX) | (1<<X86_ECX) | (1<<X86_EDX) | (1<<X86_EBX))
+
+#define X86_IS_SCRATCH(reg) (X86_CALLER_REGS & (1 << (reg))) /* X86_EAX, X86_ECX, or X86_EDX */
+#define X86_IS_CALLEE(reg) (X86_CALLEE_REGS & (1 << (reg))) /* X86_ESI, X86_EDI, X86_EBX, or X86_EBP */
+
+#define X86_IS_BYTE_REG(reg) ((reg) < 4)
+
+/*
+// Frame structure:
+//
+// +--------------------------------+
+// | in_arg[0] = var[0] |
+// | in_arg[1] = var[1] |
+// | . . . |
+// | in_arg[n_arg-1] = var[n_arg-1] |
+// +--------------------------------+
+// | return IP |
+// +--------------------------------+
+// | saved EBP | <-- frame pointer (EBP)
+// +--------------------------------+
+// | ... | n_extra
+// +--------------------------------+
+// | var[n_arg] |
+// | var[n_arg+1] | local variables area
+// | . . . |
+// | var[n_var-1] |
+// +--------------------------------+
+// | |
+// | |
+// | spill area | area for spilling mimic stack
+// | |
+// +--------------------------------|
+// | ebx |
+// | ebp [ESP_Frame only] |
+// | esi | 0..3 callee-saved regs
+// | edi | <-- stack pointer (ESP)
+// +--------------------------------+
+// | stk0 |
+// | stk1 | operand stack area/
+// | . . . | out args
+// | stkn-1 |
+// +--------------------------------|
+//
+//
+*/
+
+
+/*
+ * useful building blocks
+ */
+#define x86_modrm_mod(modrm) ((modrm) >> 6)
+#define x86_modrm_reg(modrm) (((modrm) >> 3) & 0x7)
+#define x86_modrm_rm(modrm) ((modrm) & 0x7)
+
+#define x86_address_byte(inst,m,o,r) do { *(inst)++ = ((((m)&0x03)<<6)|(((o)&0x07)<<3)|(((r)&0x07))); } while (0)
+#define x86_imm_emit32(inst,imm) \
+ do { \
+ x86_imm_buf imb; imb.val = (int) (imm); \
+ *(inst)++ = imb.b [0]; \
+ *(inst)++ = imb.b [1]; \
+ *(inst)++ = imb.b [2]; \
+ *(inst)++ = imb.b [3]; \
+ } while (0)
+#define x86_imm_emit16(inst,imm) do { *(short*)(inst) = (imm); (inst) += 2; } while (0)
+#define x86_imm_emit8(inst,imm) do { *(inst) = (unsigned char)((imm) & 0xff); ++(inst); } while (0)
+#define x86_is_imm8(imm) (((int)(imm) >= -128 && (int)(imm) <= 127))
+#define x86_is_imm16(imm) (((int)(imm) >= -(1<<16) && (int)(imm) <= ((1<<16)-1)))
+
+#define x86_reg_emit(inst,r,regno) do { x86_address_byte ((inst), 3, (r), (regno)); } while (0)
+#define x86_reg8_emit(inst,r,regno,is_rh,is_rnoh) do {x86_address_byte ((inst), 3, (is_rh)?((r)|4):(r), (is_rnoh)?((regno)|4):(regno));} while (0)
+#define x86_regp_emit(inst,r,regno) do { x86_address_byte ((inst), 0, (r), (regno)); } while (0)
+#define x86_mem_emit(inst,r,disp) do { x86_address_byte ((inst), 0, (r), 5); x86_imm_emit32((inst), (disp)); } while (0)
+
+#define kMaxMembaseEmitPadding 6
+
+#define x86_membase_emit_body(inst,r,basereg,disp) do {\
+ if ((basereg) == X86_ESP) { \
+ if ((disp) == 0) { \
+ x86_address_byte ((inst), 0, (r), X86_ESP); \
+ x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \
+ } else if (x86_is_imm8((disp))) { \
+ x86_address_byte ((inst), 1, (r), X86_ESP); \
+ x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \
+ x86_imm_emit8 ((inst), (disp)); \
+ } else { \
+ x86_address_byte ((inst), 2, (r), X86_ESP); \
+ x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ break; \
+ } \
+ if ((disp) == 0 && (basereg) != X86_EBP) { \
+ x86_address_byte ((inst), 0, (r), (basereg)); \
+ break; \
+ } \
+ if (x86_is_imm8((disp))) { \
+ x86_address_byte ((inst), 1, (r), (basereg)); \
+ x86_imm_emit8 ((inst), (disp)); \
+ } else { \
+ x86_address_byte ((inst), 2, (r), (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ } while (0)
+
+#if defined(__native_client_codegen__) && defined(TARGET_AMD64)
+#define x86_membase_emit(inst,r,basereg,disp) \
+ do { \
+ x64_nacl_membase_handler(&(inst), (basereg), (disp), (r)) ; \
+ } while (0)
+#else /* __default_codegen__ || 32-bit NaCl codegen */
+#define x86_membase_emit(inst,r,basereg,disp) \
+ do { \
+ x86_membase_emit_body((inst),(r),(basereg),(disp)); \
+ } while (0)
+#endif
+
+#define kMaxMemindexEmitPadding 6
+
+#define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift) \
+ do { \
+ if ((basereg) == X86_NOBASEREG) { \
+ x86_address_byte ((inst), 0, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), 5); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } else if ((disp) == 0 && (basereg) != X86_EBP) { \
+ x86_address_byte ((inst), 0, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \
+ } else if (x86_is_imm8((disp))) { \
+ x86_address_byte ((inst), 1, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \
+ x86_imm_emit8 ((inst), (disp)); \
+ } else { \
+ x86_address_byte ((inst), 2, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ } while (0)
+
+/*
+ * target is the position in the code where to jump to:
+ * target = code;
+ * .. output loop code...
+ * x86_mov_reg_imm (code, X86_EAX, 0);
+ * loop = code;
+ * x86_loop (code, -1);
+ * ... finish method
+ *
+ * patch displacement
+ * x86_patch (loop, target);
+ *
+ * ins should point at the start of the instruction that encodes a target.
+ * the instruction is inspected for validity and the correct displacement
+ * is inserted.
+ */
+#define x86_do_patch(ins,target) \
+ do { \
+ unsigned char* pos = (ins) + 1; \
+ int disp, size = 0; \
+ switch (*(unsigned char*)(ins)) { \
+ case 0xe8: case 0xe9: ++size; break; /* call, jump32 */ \
+ case 0x0f: if (!(*pos >= 0x70 && *pos <= 0x8f)) assert (0); \
+ ++size; ++pos; break; /* prefix for 32-bit disp */ \
+ case 0xe0: case 0xe1: case 0xe2: /* loop */ \
+ case 0xeb: /* jump8 */ \
+ /* conditional jump opcodes */ \
+ case 0x70: case 0x71: case 0x72: case 0x73: \
+ case 0x74: case 0x75: case 0x76: case 0x77: \
+ case 0x78: case 0x79: case 0x7a: case 0x7b: \
+ case 0x7c: case 0x7d: case 0x7e: case 0x7f: \
+ break; \
+ default: assert (0); \
+ } \
+ disp = (target) - pos; \
+ if (size) x86_imm_emit32 (pos, disp - 4); \
+ else if (x86_is_imm8 (disp - 1)) x86_imm_emit8 (pos, disp - 1); \
+ else assert (0); \
+ } while (0)
+
+#if defined( __native_client_codegen__ ) && defined(TARGET_X86)
+
+#define x86_skip_nops(inst) \
+ do { \
+ int in_nop = 0; \
+ do { \
+ in_nop = 0; \
+ if (inst[0] == 0x90) { \
+ in_nop = 1; \
+ inst += 1; \
+ } \
+ if (inst[0] == 0x8b && inst[1] == 0xc0) { \
+ in_nop = 1; \
+ inst += 2; \
+ } \
+ if (inst[0] == 0x8d && inst[1] == 0x6d \
+ && inst[2] == 0x00) { \
+ in_nop = 1; \
+ inst += 3; \
+ } \
+ if (inst[0] == 0x8d && inst[1] == 0x64 \
+ && inst[2] == 0x24 && inst[3] == 0x00) { \
+ in_nop = 1; \
+ inst += 4; \
+ } \
+ /* skip inst+=5 case because it's the 4-byte + 1-byte case */ \
+ if (inst[0] == 0x8d && inst[1] == 0xad \
+ && inst[2] == 0x00 && inst[3] == 0x00 \
+ && inst[4] == 0x00 && inst[5] == 0x00) { \
+ in_nop = 1; \
+ inst += 6; \
+ } \
+ if (inst[0] == 0x8d && inst[1] == 0xa4 \
+ && inst[2] == 0x24 && inst[3] == 0x00 \
+ && inst[4] == 0x00 && inst[5] == 0x00 \
+ && inst[6] == 0x00 ) { \
+ in_nop = 1; \
+ inst += 7; \
+ } \
+ } while ( in_nop ); \
+ } while (0)
+
+#if defined(__native_client__)
+#define x86_patch(ins,target) \
+ do { \
+ unsigned char* inst = (ins); \
+ guint8* new_target = nacl_modify_patch_target((target)); \
+ x86_skip_nops((inst)); \
+ x86_do_patch((inst), new_target); \
+ } while (0)
+#else /* __native_client__ */
+#define x86_patch(ins,target) \
+ do { \
+ unsigned char* inst = (ins); \
+ guint8* new_target = (target); \
+ x86_skip_nops((inst)); \
+ x86_do_patch((inst), new_target); \
+ } while (0)
+#endif /* __native_client__ */
+
+#else
+#define x86_patch(ins,target) do { x86_do_patch((ins), (target)); } while (0)
+#endif /* __native_client_codegen__ */
+
+#ifdef __native_client_codegen__
+/* The breakpoint instruction is illegal in Native Client, although the HALT */
+/* instruction is allowed. The breakpoint is used several places in mini-x86.c */
+/* and exceptions-x86.c. */
+#define x86_breakpoint(inst) \
+ do { \
+ *(inst)++ = 0xf4; \
+ } while (0)
+#else
+#define x86_breakpoint(inst) \
+ do { \
+ *(inst)++ = 0xcc; \
+ } while (0)
+#endif
+
+#define x86_cld(inst) do { *(inst)++ =(unsigned char)0xfc; } while (0)
+#define x86_stosb(inst) do { *(inst)++ =(unsigned char)0xaa; } while (0)
+#define x86_stosl(inst) do { *(inst)++ =(unsigned char)0xab; } while (0)
+#define x86_stosd(inst) x86_stosl((inst))
+#define x86_movsb(inst) do { *(inst)++ =(unsigned char)0xa4; } while (0)
+#define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0)
+#define x86_movsd(inst) x86_movsl((inst))
+
+#if defined(__native_client_codegen__)
+#if defined(TARGET_X86)
+/* kNaClAlignment - 1 is the max value we can pass into x86_codegen_pre. */
+/* This keeps us from having to call x86_codegen_pre with specific */
+/* knowledge of the size of the instruction that follows it, and */
+/* localizes the alignment requirement to this spot. */
+#define x86_prefix(inst,p) \
+ do { \
+ x86_codegen_pre(&(inst), kNaClAlignment - 1); \
+ *(inst)++ =(unsigned char) (p); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* We need to tag any prefixes so we can perform proper membase sandboxing */
+/* See: mini-amd64.c:amd64_nacl_membase_handler for verbose details */
+#define x86_prefix(inst,p) \
+ do { \
+ x64_nacl_tag_legacy_prefix((inst)); \
+ *(inst)++ =(unsigned char) (p); \
+ } while (0)
+
+#endif /* TARGET_AMD64 */
+
+#else
+#define x86_prefix(inst,p) \
+ do { \
+ *(inst)++ =(unsigned char) (p); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x86_rdtsc(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = 0x0f; \
+ *(inst)++ = 0x31; \
+ } while (0)
+
+#define x86_cmpxchg_reg_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xb1; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_cmpxchg_mem_reg(inst,mem,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xb1; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_cmpxchg_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xb1; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_xchg_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0x86; \
+ else \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_xchg_mem_reg(inst,mem,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0x86; \
+ else \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_xchg_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0x86; \
+ else \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_xadd_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0F; \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0xC0; \
+ else \
+ *(inst)++ = (unsigned char)0xC1; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_xadd_mem_reg(inst,mem,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0F; \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0xC0; \
+ else \
+ *(inst)++ = (unsigned char)0xC1; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_xadd_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0F; \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0xC0; \
+ else \
+ *(inst)++ = (unsigned char)0xC1; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_inc_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_inc_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_inc_reg(inst,reg) do { *(inst)++ = (unsigned char)0x40 + (reg); } while (0)
+
+#define x86_dec_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 1, (mem)); \
+ } while (0)
+
+#define x86_dec_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 1, (basereg), (disp)); \
+ } while (0)
+
+#define x86_dec_reg(inst,reg) do { *(inst)++ = (unsigned char)0x48 + (reg); } while (0)
+
+#define x86_not_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 2, (mem)); \
+ } while (0)
+
+#define x86_not_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } while (0)
+
+#define x86_not_reg(inst,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 2, (reg)); \
+ } while (0)
+
+#define x86_neg_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 3, (mem)); \
+ } while (0)
+
+#define x86_neg_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 3, (basereg), (disp)); \
+ } while (0)
+
+#define x86_neg_reg(inst,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 3, (reg)); \
+ } while (0)
+
+#define x86_nop(inst) do { *(inst)++ = (unsigned char)0x90; } while (0)
+
+#define x86_alu_reg_imm(inst,opc,reg,imm) \
+ do { \
+ if ((reg) == X86_EAX) { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \
+ x86_imm_emit32 ((inst), (imm)); \
+ break; \
+ } \
+ if (x86_is_imm8((imm))) { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_alu_mem_imm(inst,opc,mem,imm) \
+ do { \
+ if (x86_is_imm8((imm))) { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 10); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_alu_membase_imm(inst,opc,basereg,disp,imm) \
+ do { \
+ if (x86_is_imm8((imm))) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_alu_membase8_imm(inst,opc,basereg,disp,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x80; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_alu_mem_reg(inst,opc,mem,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_alu_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_alu_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+/**
+ * @x86_alu_reg8_reg8:
+ * Supports ALU operations between two 8-bit registers.
+ * dreg := dreg opc reg
+ * X86_Reg_No enum is used to specify the registers.
+ * Additionally is_*_h flags are used to specify what part
+ * of a given 32-bit register is used - high (TRUE) or low (FALSE).
+ * For example: dreg = X86_EAX, is_dreg_h = TRUE -> use AH
+ */
+#define x86_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 2; \
+ x86_reg8_emit ((inst), (dreg), (reg), (is_dreg_h), (is_reg_h)); \
+ } while (0)
+
+#define x86_alu_reg_mem(inst,opc,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_alu_reg_membase(inst,opc,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_test_reg_imm(inst,reg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((reg) == X86_EAX) { \
+ *(inst)++ = (unsigned char)0xa9; \
+ } else { \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 0, (reg)); \
+ } \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_mem_imm8(inst,mem,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0xf6; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_mem_imm(inst,mem,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 10); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_membase_imm(inst,basereg,disp,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_reg_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0x85; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_test_mem_reg(inst,mem,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x85; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_test_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x85; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_shift_reg_imm(inst,opc,reg,imm) \
+ do { \
+ if ((imm) == 1) { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd1; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ } else { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0xc1; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_shift_mem_imm(inst,opc,mem,imm) \
+ do { \
+ if ((imm) == 1) { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd1; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ } else { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0xc1; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_shift_membase_imm(inst,opc,basereg,disp,imm) \
+ do { \
+ if ((imm) == 1) { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd1; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ } else { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xc1; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_shift_reg(inst,opc,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd3; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ } while (0)
+
+#define x86_shift_mem(inst,opc,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd3; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ } while (0)
+
+#define x86_shift_membase(inst,opc,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd3; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ } while (0)
+
+/*
+ * Multi op shift missing.
+ */
+
+#define x86_shrd_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xad; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_shrd_reg_imm(inst,dreg,reg,shamt) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xac; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ x86_imm_emit8 ((inst), (shamt)); \
+ } while (0)
+
+#define x86_shld_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xa5; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_shld_reg_imm(inst,dreg,reg,shamt) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xa4; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ x86_imm_emit8 ((inst), (shamt)); \
+ } while (0)
+
+/*
+ * EDX:EAX = EAX * rm
+ */
+#define x86_mul_reg(inst,reg,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 4 + ((is_signed) ? 1 : 0), (reg)); \
+ } while (0)
+
+#define x86_mul_mem(inst,mem,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 4 + ((is_signed) ? 1 : 0), (mem)); \
+ } while (0)
+
+#define x86_mul_membase(inst,basereg,disp,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 4 + ((is_signed) ? 1 : 0), (basereg), (disp)); \
+ } while (0)
+
+/*
+ * r *= rm
+ */
+#define x86_imul_reg_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xaf; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_imul_reg_mem(inst,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xaf; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_imul_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xaf; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+/*
+ * dreg = rm * imm
+ */
+#define x86_imul_reg_reg_imm(inst,dreg,reg,imm) \
+ do { \
+ if (x86_is_imm8 ((imm))) { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x6b; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x69; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_imul_reg_mem_imm(inst,reg,mem,imm) \
+ do { \
+ if (x86_is_imm8 ((imm))) { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x6b; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x69; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_imul_reg_membase_imm(inst,reg,basereg,disp,imm) \
+ do { \
+ if (x86_is_imm8 ((imm))) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x6b; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x69; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+/*
+ * divide EDX:EAX by rm;
+ * eax = quotient, edx = remainder
+ */
+
+#define x86_div_reg(inst,reg,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 6 + ((is_signed) ? 1 : 0), (reg)); \
+ } while (0)
+
+#define x86_div_mem(inst,mem,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 6 + ((is_signed) ? 1 : 0), (mem)); \
+ } while (0)
+
+#define x86_div_membase(inst,basereg,disp,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 6 + ((is_signed) ? 1 : 0), (basereg), (disp)); \
+ } while (0)
+
+#define x86_mov_mem_reg(inst,mem,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_mov_regp_reg(inst,regp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_regp_emit ((inst), (reg), (regp)); \
+ } while (0)
+
+#define x86_mov_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_mov_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_mov_reg_mem(inst,reg,mem,size) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define kMovRegMembasePadding (2 + kMaxMembaseEmitPadding)
+
+#define x86_mov_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ x86_codegen_pre(&(inst), kMovRegMembasePadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+/*
+ * Note: x86_clear_reg () chacnges the condition code!
+ */
+#define x86_clear_reg(inst,reg) x86_alu_reg_reg((inst), X86_XOR, (reg), (reg))
+
+#define x86_mov_reg_imm(inst,reg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0xb8 + (reg); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_mov_mem_imm(inst,mem,imm,size) \
+ do { \
+ if ((size) == 1) { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ x86_codegen_pre(&(inst), 9); \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 10); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_mov_membase_imm(inst,basereg,disp,imm,size) \
+ do { \
+ if ((size) == 1) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) \
+ do { \
+ if ((size) == 1) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ x86_codegen_pre(&(inst), 4 + kMaxMemindexEmitPadding); \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_lea_mem(inst,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_lea_membase(inst,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_lea_memindex(inst,reg,basereg,disp,indexreg,shift) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_widen_reg(inst,dreg,reg,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ g_assert (is_half || X86_IS_BYTE_REG (reg)); \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_widen_mem(inst,dreg,mem,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_mem_emit ((inst), (dreg), (mem)); \
+ } while (0)
+
+#define x86_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_memindex_emit ((inst), (dreg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_cdq(inst) do { *(inst)++ = (unsigned char)0x99; } while (0)
+#define x86_wait(inst) do { *(inst)++ = (unsigned char)0x9b; } while (0)
+
+#define x86_fp_op_mem(inst,opc,mem,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ } while (0)
+
+#define x86_fp_op_membase(inst,opc,basereg,disp,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ } while (0)
+
+#define x86_fp_op(inst,opc,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd8; \
+ *(inst)++ = (unsigned char)0xc0+((opc)<<3)+((index)&0x07); \
+ } while (0)
+
+#define x86_fp_op_reg(inst,opc,index,pop_stack) \
+ do { \
+ static const unsigned char map[] = { 0, 1, 2, 3, 5, 4, 7, 6, 8}; \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (pop_stack) ? (unsigned char)0xde : (unsigned char)0xdc; \
+ *(inst)++ = (unsigned char)0xc0+(map[(opc)]<<3)+((index)&0x07); \
+ } while (0)
+
+/**
+ * @x86_fp_int_op_membase
+ * Supports FPU operations between ST(0) and integer operand in memory.
+ * Operation encoded using X86_FP_Opcode enum.
+ * Operand is addressed by [basereg + disp].
+ * is_int specifies whether operand is int32 (TRUE) or int16 (FALSE).
+ */
+#define x86_fp_int_op_membase(inst,opc,basereg,disp,is_int) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_int) ? (unsigned char)0xda : (unsigned char)0xde; \
+ x86_membase_emit ((inst), opc, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fstp(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdd; \
+ *(inst)++ = (unsigned char)0xd8+(index); \
+ } while (0)
+
+#define x86_fcompp(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xde; \
+ *(inst)++ = (unsigned char)0xd9; \
+ } while (0)
+
+#define x86_fucompp(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xda; \
+ *(inst)++ = (unsigned char)0xe9; \
+ } while (0)
+
+#define x86_fnstsw(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xe0; \
+ } while (0)
+
+#define x86_fnstcw(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 7, (mem)); \
+ } while (0)
+
+#define x86_fnstcw_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 7, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fldcw(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 5, (mem)); \
+ } while (0)
+
+#define x86_fldcw_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 5, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fchs(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xe0; \
+ } while (0)
+
+#define x86_frem(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xf8; \
+ } while (0)
+
+#define x86_fxch(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xc8 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fcomi(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdb; \
+ *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fcomip(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fucomi(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdb; \
+ *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fucomip(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fld(inst,mem,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_fld_membase(inst,basereg,disp,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fld80_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 5, (mem)); \
+ } while (0)
+
+#define x86_fld80_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 5, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fild(inst,mem,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_mem_emit ((inst), 5, (mem)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } \
+ } while (0)
+
+#define x86_fild_membase(inst,basereg,disp,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_membase_emit ((inst), 5, (basereg), (disp)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } \
+ } while (0)
+
+#define x86_fld_reg(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xc0 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fldz(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xee; \
+ } while (0)
+
+#define x86_fld1(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xe8; \
+ } while (0)
+
+#define x86_fldpi(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xeb; \
+ } while (0)
+
+#define x86_fst(inst,mem,is_double,pop_stack) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (mem)); \
+ } while (0)
+
+#define x86_fst_membase(inst,basereg,disp,is_double,pop_stack) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (basereg), (disp)); \
+ } while (0)
+
+#define x86_fst80_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 7, (mem)); \
+ } while (0)
+
+
+#define x86_fst80_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 7, (basereg), (disp)); \
+ } while (0)
+
+
+#define x86_fist_pop(inst,mem,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_mem_emit ((inst), 7, (mem)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 3, (mem)); \
+ } \
+ } while (0)
+
+#define x86_fist_pop_membase(inst,basereg,disp,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_membase_emit ((inst), 7, (basereg), (disp)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 3, (basereg), (disp)); \
+ } \
+ } while (0)
+
+#define x86_fstsw(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x9b; \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xe0; \
+ } while (0)
+
+/**
+ * @x86_fist_membase
+ * Converts content of ST(0) to integer and stores it at memory location
+ * addressed by [basereg + disp].
+ * is_int specifies whether destination is int32 (TRUE) or int16 (FALSE).
+ */
+#define x86_fist_membase(inst,basereg,disp,is_int) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((is_int)) { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } \
+ } while (0)
+
+
+#define x86_push_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0x50 + (reg); \
+ } while (0)
+
+#define x86_push_regp(inst,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_regp_emit ((inst), 6, (reg)); \
+ } while (0)
+
+#define x86_push_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 6, (mem)); \
+ } while (0)
+
+#define x86_push_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 6, (basereg), (disp)); \
+ } while (0)
+
+#define x86_push_memindex(inst,basereg,disp,indexreg,shift) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_memindex_emit ((inst), 6, (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_push_imm_template(inst) x86_push_imm (inst, 0xf0f0f0f0)
+
+#define x86_push_imm(inst,imm) \
+ do { \
+ int _imm = (int) (imm); \
+ if (x86_is_imm8 (_imm)) { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0x6A; \
+ x86_imm_emit8 ((inst), (_imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x68; \
+ x86_imm_emit32 ((inst), (_imm)); \
+ } \
+ } while (0)
+
+#define x86_pop_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0x58 + (reg); \
+ } while (0)
+
+#define x86_pop_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_pop_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_pushad(inst) do { *(inst)++ = (unsigned char)0x60; } while (0)
+#define x86_pushfd(inst) do { *(inst)++ = (unsigned char)0x9c; } while (0)
+#define x86_popad(inst) do { *(inst)++ = (unsigned char)0x61; } while (0)
+#define x86_popfd(inst) do { *(inst)++ = (unsigned char)0x9d; } while (0)
+
+#define x86_loop(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xe2; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_loope(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xe1; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_loopne(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xe0; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#if defined(TARGET_X86)
+#define x86_jump32(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0xe9; \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_jump8(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xeb; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* These macros are used directly from mini-amd64.c and other */
+/* x64 specific files, so they need to be instrumented directly. */
+#define x86_jump32(inst,imm) \
+ do { \
+ x64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)0xe9; \
+ x86_imm_emit32 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x86_jump8(inst,imm) \
+ do { \
+ x64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)0xeb; \
+ x86_imm_emit8 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+#endif
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+#define x86_jump_reg(inst,reg) do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x83; /* and */ \
+ x86_reg_emit ((inst), 4, (reg)); /* reg */ \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 4, (reg)); \
+ } while (0)
+
+/* Let's hope ECX is available for these... */
+#define x86_jump_mem(inst,mem) do { \
+ x86_mov_reg_mem(inst, (X86_ECX), (mem), 4); \
+ x86_jump_reg(inst, (X86_ECX)); \
+ } while (0)
+
+#define x86_jump_membase(inst,basereg,disp) do { \
+ x86_mov_reg_membase(inst, (X86_ECX), basereg, disp, 4); \
+ x86_jump_reg(inst, (X86_ECX)); \
+ } while (0)
+
+/* like x86_jump_membase, but force a 32-bit displacement */
+#define x86_jump_membase32(inst,basereg,disp) do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x8b; \
+ x86_address_byte ((inst), 2, X86_ECX, (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ x86_jump_reg(inst, (X86_ECX)); \
+ } while (0)
+#else /* __native_client_codegen__ */
+#define x86_jump_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 4, (reg)); \
+ } while (0)
+
+#define x86_jump_mem(inst,mem) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 4, (mem)); \
+ } while (0)
+
+#define x86_jump_membase(inst,basereg,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 4, (basereg), (disp)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+/*
+ * target is a pointer in our buffer.
+ */
+#define x86_jump_code_body(inst,target) \
+ do { \
+ int t; \
+ x86_codegen_pre(&(inst), 2); \
+ t = (unsigned char*)(target) - (inst) - 2; \
+ if (x86_is_imm8(t)) { \
+ x86_jump8 ((inst), t); \
+ } else { \
+ x86_codegen_pre(&(inst), 5); \
+ t = (unsigned char*)(target) - (inst) - 5; \
+ x86_jump32 ((inst), t); \
+ } \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+#if defined(TARGET_X86)
+#define x86_jump_code(inst,target) \
+ do { \
+ guint8* jump_start = (inst); \
+ x86_jump_code_body((inst),(target)); \
+ x86_patch(jump_start, (target)); \
+ } while (0)
+#else if defined(TARGET_AMD64)
+#define x86_jump_code(inst,target) \
+ do { \
+ /* jump_code_body is used twice because there are offsets */ \
+ /* calculated based on the IP, which can change after the */ \
+ /* call to x64_codegen_post */ \
+ x64_codegen_pre(inst); \
+ x86_jump_code_body((inst),(target)); \
+ inst = x64_codegen_post(inst); \
+ x86_jump_code_body((inst),(target)); \
+ } while (0)
+#endif
+#else
+#define x86_jump_code(inst,target) \
+ do { \
+ x86_jump_code_body((inst),(target)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x86_jump_disp(inst,disp) \
+ do { \
+ int t = (disp) - 2; \
+ if (x86_is_imm8(t)) { \
+ x86_jump8 ((inst), t); \
+ } else { \
+ t -= 3; \
+ x86_jump32 ((inst), t); \
+ } \
+ } while (0)
+
+#if defined(TARGET_X86)
+#define x86_branch8(inst,cond,imm,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)]; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)]; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_branch32(inst,cond,imm,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* These macros are used directly from mini-amd64.c and other */
+/* x64 specific files, so they need to be instrumented directly. */
+#define x86_branch8(inst,cond,imm,is_signed) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)]; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)]; \
+ x86_imm_emit8 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+#define x86_branch32(inst,cond,imm,is_signed) \
+ do { \
+ x64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \
+ x86_imm_emit32 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+#endif
+
+#if defined(TARGET_X86)
+#define x86_branch(inst,cond,target,is_signed) \
+ do { \
+ int offset; \
+ guint8* branch_start; \
+ x86_codegen_pre(&(inst), 2); \
+ offset = (target) - (inst) - 2; \
+ branch_start = (inst); \
+ if (x86_is_imm8 ((offset))) \
+ x86_branch8 ((inst), (cond), offset, (is_signed)); \
+ else { \
+ x86_codegen_pre(&(inst), 6); \
+ offset = (target) - (inst) - 6; \
+ x86_branch32 ((inst), (cond), offset, (is_signed)); \
+ } \
+ x86_patch(branch_start, (target)); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* This macro is used directly from mini-amd64.c and other */
+/* x64 specific files, so it needs to be instrumented directly. */
+
+#define x86_branch_body(inst,cond,target,is_signed) \
+ do { \
+ int offset = (target) - (inst) - 2; \
+ if (x86_is_imm8 ((offset))) \
+ x86_branch8 ((inst), (cond), offset, (is_signed)); \
+ else { \
+ offset = (target) - (inst) - 6; \
+ x86_branch32 ((inst), (cond), offset, (is_signed)); \
+ } \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+#define x86_branch(inst,cond,target,is_signed) \
+ do { \
+ /* branch_body is used twice because there are offsets */ \
+ /* calculated based on the IP, which can change after */ \
+ /* the call to x64_codegen_post */ \
+ x64_codegen_pre(inst); \
+ x86_branch_body((inst),(cond),(target),(is_signed)); \
+ inst = x64_codegen_post(inst); \
+ x86_branch_body((inst),(cond),(target),(is_signed)); \
+ } while (0)
+#else
+#define x86_branch(inst,cond,target,is_signed) \
+ do { \
+ x86_branch_body((inst),(cond),(target),(is_signed)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#endif /* TARGET_AMD64 */
+
+#define x86_branch_disp(inst,cond,disp,is_signed) \
+ do { \
+ int offset = (disp) - 2; \
+ if (x86_is_imm8 ((offset))) \
+ x86_branch8 ((inst), (cond), offset, (is_signed)); \
+ else { \
+ offset -= 4; \
+ x86_branch32 ((inst), (cond), offset, (is_signed)); \
+ } \
+ } while (0)
+
+#define x86_set_reg(inst,cond,reg,is_signed) \
+ do { \
+ g_assert (X86_IS_BYTE_REG (reg)); \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \
+ x86_reg_emit ((inst), 0, (reg)); \
+ } while (0)
+
+#define x86_set_mem(inst,cond,mem,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_set_membase(inst,cond,basereg,disp,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_call_imm_body(inst,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xe8; \
+ x86_imm_emit32 ((inst), (int)(disp)); \
+ } while (0)
+
+#define x86_call_imm(inst,disp) \
+ do { \
+ x86_call_sequence_pre((inst)); \
+ x86_call_imm_body((inst), (disp)); \
+ x86_call_sequence_post((inst)); \
+ } while (0)
+
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+#define x86_call_reg_internal(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0x83; /* and */ \
+ x86_reg_emit ((inst), 4, (reg)); /* reg */ \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; /* call */ \
+ x86_reg_emit ((inst), 2, (reg)); /* reg */ \
+ } while (0)
+
+#define x86_call_reg(inst, reg) do { \
+ x86_call_sequence_pre((inst)); \
+ x86_call_reg_internal(inst, reg); \
+ x86_call_sequence_post((inst)); \
+ } while (0)
+
+
+/* It appears that x86_call_mem() is never used, so I'm leaving it out. */
+#define x86_call_membase(inst,basereg,disp) do { \
+ x86_call_sequence_pre((inst)); \
+ /* x86_mov_reg_membase() inlined so its fixed size */ \
+ *(inst)++ = (unsigned char)0x8b; \
+ x86_address_byte ((inst), 2, (X86_ECX), (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ x86_call_reg_internal(inst, X86_ECX); \
+ x86_call_sequence_post((inst)); \
+ } while (0)
+#else /* __native_client_codegen__ */
+#define x86_call_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 2, (reg)); \
+ } while (0)
+
+#define x86_call_mem(inst,mem) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 2, (mem)); \
+ } while (0)
+
+#define x86_call_membase(inst,basereg,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+
+#define x86_call_code(inst,target) \
+ do { \
+ int _x86_offset; \
+ guint8* call_start; \
+ guint8* _aligned_start; \
+ x86_call_sequence_pre_val((inst)); \
+ _x86_offset = (unsigned char*)(target) - (inst); \
+ _x86_offset -= 5; \
+ x86_call_imm_body ((inst), _x86_offset); \
+ _aligned_start = x86_call_sequence_post_val((inst)); \
+ call_start = _aligned_start; \
+ _x86_offset = (unsigned char*)(target) - (_aligned_start); \
+ _x86_offset -= 5; \
+ x86_call_imm_body ((_aligned_start), _x86_offset); \
+ x86_patch(call_start, (target)); \
+ } while (0)
+
+#define SIZE_OF_RET 6
+#define x86_ret(inst) do { \
+ *(inst)++ = (unsigned char)0x59; /* pop ecx */ \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+ } while (0)
+
+/* pop return address */
+/* pop imm bytes from stack */
+/* return */
+#define x86_ret_imm(inst,imm) do { \
+ *(inst)++ = (unsigned char)0x59; /* pop ecx */ \
+ x86_alu_reg_imm ((inst), X86_ADD, X86_ESP, imm); \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+} while (0)
+#else /* __native_client_codegen__ */
+
+#define x86_call_code(inst,target) \
+ do { \
+ int _x86_offset; \
+ _x86_offset = (unsigned char*)(target) - (inst); \
+ _x86_offset -= 5; \
+ x86_call_imm_body ((inst), _x86_offset); \
+ } while (0)
+
+#define x86_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0)
+
+#define x86_ret_imm(inst,imm) \
+ do { \
+ if ((imm) == 0) { \
+ x86_ret ((inst)); \
+ } else { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0xc2; \
+ x86_imm_emit16 ((inst), (imm)); \
+ } \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x86_cmov_reg(inst,cond,is_signed,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char) 0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_cmov_mem(inst,cond,is_signed,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char) 0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_cmov_membase(inst,cond,is_signed,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char) 0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_enter(inst,framesize) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xc8; \
+ x86_imm_emit16 ((inst), (framesize)); \
+ *(inst)++ = 0; \
+ } while (0)
+
+#define x86_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0)
+#define x86_sahf(inst) do { *(inst)++ = (unsigned char)0x9e; } while (0)
+
+#define x86_fsin(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfe; } while (0)
+#define x86_fcos(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xff; } while (0)
+#define x86_fabs(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe1; } while (0)
+#define x86_ftst(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe4; } while (0)
+#define x86_fxam(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe5; } while (0)
+#define x86_fpatan(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf3; } while (0)
+#define x86_fprem(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf8; } while (0)
+#define x86_fprem1(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf5; } while (0)
+#define x86_frndint(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfc; } while (0)
+#define x86_fsqrt(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfa; } while (0)
+#define x86_fptan(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf2; } while (0)
+
+#define x86_padding(inst,size) \
+ do { \
+ switch ((size)) { \
+ case 1: x86_nop ((inst)); break; \
+ case 2: *(inst)++ = 0x8b; \
+ *(inst)++ = 0xc0; break; \
+ case 3: *(inst)++ = 0x8d; *(inst)++ = 0x6d; \
+ *(inst)++ = 0x00; break; \
+ case 4: *(inst)++ = 0x8d; *(inst)++ = 0x64; \
+ *(inst)++ = 0x24; *(inst)++ = 0x00; \
+ break; \
+ case 5: *(inst)++ = 0x8d; *(inst)++ = 0x64; \
+ *(inst)++ = 0x24; *(inst)++ = 0x00; \
+ x86_nop ((inst)); break; \
+ case 6: *(inst)++ = 0x8d; *(inst)++ = 0xad; \
+ *(inst)++ = 0x00; *(inst)++ = 0x00; \
+ *(inst)++ = 0x00; *(inst)++ = 0x00; \
+ break; \
+ case 7: *(inst)++ = 0x8d; *(inst)++ = 0xa4; \
+ *(inst)++ = 0x24; *(inst)++ = 0x00; \
+ *(inst)++ = 0x00; *(inst)++ = 0x00; \
+ *(inst)++ = 0x00; break; \
+ default: assert (0); \
+ } \
+ } while (0)
+
+#ifdef __native_client_codegen__
+
+#define kx86NaClLengthOfCallReg 5
+#define kx86NaClLengthOfCallImm 5
+#define kx86NaClLengthOfCallMembase (kx86NaClLengthOfCallReg + 6)
+
+#endif /* __native_client_codegen__ */
+
+#define x86_prolog(inst,frame_size,reg_mask) \
+ do { \
+ unsigned i, m = 1; \
+ x86_enter ((inst), (frame_size)); \
+ for (i = 0; i < X86_NREG; ++i, m <<= 1) { \
+ if ((reg_mask) & m) \
+ x86_push_reg ((inst), i); \
+ } \
+ } while (0)
+
+#define x86_epilog(inst,reg_mask) \
+ do { \
+ unsigned i, m = 1 << X86_EDI; \
+ for (i = X86_EDI; m != 0; i--, m=m>>1) { \
+ if ((reg_mask) & m) \
+ x86_pop_reg ((inst), i); \
+ } \
+ x86_leave ((inst)); \
+ x86_ret ((inst)); \
+ } while (0)
+
+
+typedef enum {
+ X86_SSE_SQRT = 0x51,
+ X86_SSE_RSQRT = 0x52,
+ X86_SSE_RCP = 0x53,
+ X86_SSE_ADD = 0x58,
+ X86_SSE_DIV = 0x5E,
+ X86_SSE_MUL = 0x59,
+ X86_SSE_SUB = 0x5C,
+ X86_SSE_MIN = 0x5D,
+ X86_SSE_MAX = 0x5F,
+ X86_SSE_COMP = 0xC2,
+ X86_SSE_AND = 0x54,
+ X86_SSE_ANDN = 0x55,
+ X86_SSE_OR = 0x56,
+ X86_SSE_XOR = 0x57,
+ X86_SSE_UNPCKL = 0x14,
+ X86_SSE_UNPCKH = 0x15,
+
+ X86_SSE_ADDSUB = 0xD0,
+ X86_SSE_HADD = 0x7C,
+ X86_SSE_HSUB = 0x7D,
+ X86_SSE_MOVSHDUP = 0x16,
+ X86_SSE_MOVSLDUP = 0x12,
+ X86_SSE_MOVDDUP = 0x12,
+
+ X86_SSE_PAND = 0xDB,
+ X86_SSE_POR = 0xEB,
+ X86_SSE_PXOR = 0xEF,
+
+ X86_SSE_PADDB = 0xFC,
+ X86_SSE_PADDW = 0xFD,
+ X86_SSE_PADDD = 0xFE,
+ X86_SSE_PADDQ = 0xD4,
+
+ X86_SSE_PSUBB = 0xF8,
+ X86_SSE_PSUBW = 0xF9,
+ X86_SSE_PSUBD = 0xFA,
+ X86_SSE_PSUBQ = 0xFB,
+
+ X86_SSE_PMAXSB = 0x3C, /*sse41*/
+ X86_SSE_PMAXSW = 0xEE,
+ X86_SSE_PMAXSD = 0x3D, /*sse41*/
+
+ X86_SSE_PMAXUB = 0xDE,
+ X86_SSE_PMAXUW = 0x3E, /*sse41*/
+ X86_SSE_PMAXUD = 0x3F, /*sse41*/
+
+ X86_SSE_PMINSB = 0x38, /*sse41*/
+ X86_SSE_PMINSW = 0xEA,
+ X86_SSE_PMINSD = 0x39,/*sse41*/
+
+ X86_SSE_PMINUB = 0xDA,
+ X86_SSE_PMINUW = 0x3A, /*sse41*/
+ X86_SSE_PMINUD = 0x3B, /*sse41*/
+
+ X86_SSE_PAVGB = 0xE0,
+ X86_SSE_PAVGW = 0xE3,
+
+ X86_SSE_PCMPEQB = 0x74,
+ X86_SSE_PCMPEQW = 0x75,
+ X86_SSE_PCMPEQD = 0x76,
+ X86_SSE_PCMPEQQ = 0x29, /*sse41*/
+
+ X86_SSE_PCMPGTB = 0x64,
+ X86_SSE_PCMPGTW = 0x65,
+ X86_SSE_PCMPGTD = 0x66,
+ X86_SSE_PCMPGTQ = 0x37, /*sse42*/
+
+ X86_SSE_PSADBW = 0xf6,
+
+ X86_SSE_PSHUFD = 0x70,
+
+ X86_SSE_PUNPCKLBW = 0x60,
+ X86_SSE_PUNPCKLWD = 0x61,
+ X86_SSE_PUNPCKLDQ = 0x62,
+ X86_SSE_PUNPCKLQDQ = 0x6C,
+
+ X86_SSE_PUNPCKHBW = 0x68,
+ X86_SSE_PUNPCKHWD = 0x69,
+ X86_SSE_PUNPCKHDQ = 0x6A,
+ X86_SSE_PUNPCKHQDQ = 0x6D,
+
+ X86_SSE_PACKSSWB = 0x63,
+ X86_SSE_PACKSSDW = 0x6B,
+
+ X86_SSE_PACKUSWB = 0x67,
+ X86_SSE_PACKUSDW = 0x2B,/*sse41*/
+
+ X86_SSE_PADDUSB = 0xDC,
+ X86_SSE_PADDUSW = 0xDD,
+ X86_SSE_PSUBUSB = 0xD8,
+ X86_SSE_PSUBUSW = 0xD9,
+
+ X86_SSE_PADDSB = 0xEC,
+ X86_SSE_PADDSW = 0xED,
+ X86_SSE_PSUBSB = 0xE8,
+ X86_SSE_PSUBSW = 0xE9,
+
+ X86_SSE_PMULLW = 0xD5,
+ X86_SSE_PMULLD = 0x40,/*sse41*/
+ X86_SSE_PMULHUW = 0xE4,
+ X86_SSE_PMULHW = 0xE5,
+ X86_SSE_PMULUDQ = 0xF4,
+
+ X86_SSE_PMOVMSKB = 0xD7,
+
+ X86_SSE_PSHIFTW = 0x71,
+ X86_SSE_PSHIFTD = 0x72,
+ X86_SSE_PSHIFTQ = 0x73,
+ X86_SSE_SHR = 2,
+ X86_SSE_SAR = 4,
+ X86_SSE_SHL = 6,
+
+ X86_SSE_PSRLW_REG = 0xD1,
+ X86_SSE_PSRAW_REG = 0xE1,
+ X86_SSE_PSLLW_REG = 0xF1,
+
+ X86_SSE_PSRLD_REG = 0xD2,
+ X86_SSE_PSRAD_REG = 0xE2,
+ X86_SSE_PSLLD_REG = 0xF2,
+
+ X86_SSE_PSRLQ_REG = 0xD3,
+ X86_SSE_PSLLQ_REG = 0xF3,
+
+ X86_SSE_PREFETCH = 0x18,
+ X86_SSE_MOVNTPS = 0x2B,
+ X86_SSE_MOVHPD_REG_MEMBASE = 0x16,
+ X86_SSE_MOVHPD_MEMBASE_REG = 0x17,
+
+ X86_SSE_MOVSD_REG_MEMBASE = 0x10,
+ X86_SSE_MOVSD_MEMBASE_REG = 0x11,
+
+ X86_SSE_PINSRB = 0x20,/*sse41*/
+ X86_SSE_PINSRW = 0xC4,
+ X86_SSE_PINSRD = 0x22,/*sse41*/
+
+ X86_SSE_PEXTRB = 0x14,/*sse41*/
+ X86_SSE_PEXTRW = 0xC5,
+ X86_SSE_PEXTRD = 0x16,/*sse41*/
+
+ X86_SSE_SHUFP = 0xC6,
+
+ X86_SSE_CVTDQ2PD = 0xE6,
+ X86_SSE_CVTDQ2PS = 0x5B,
+ X86_SSE_CVTPD2DQ = 0xE6,
+ X86_SSE_CVTPD2PS = 0x5A,
+ X86_SSE_CVTPS2DQ = 0x5B,
+ X86_SSE_CVTPS2PD = 0x5A,
+ X86_SSE_CVTTPD2DQ = 0xE6,
+ X86_SSE_CVTTPS2DQ = 0x5B,
+} X86_SSE_Opcode;
+
+
+/* minimal SSE* support */
+#define x86_movsd_reg_membase(inst,dreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf2; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_cvttsd2si(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xf2; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x2c; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_reg_membase(inst,opc,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_sse_alu_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_sse_alu_reg_reg_imm8(inst,opc,dreg,reg, imm8) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ *(inst)++ = (unsigned char)(imm8); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_reg_imm8(inst,opc,dreg,reg, imm8) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_reg_reg_imm8 ((inst), (opc), (dreg), (reg), (imm8)); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_pd_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_membase(inst,opc,dreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_reg_imm(inst,opc,dreg,reg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ x86_sse_alu_pd_reg_reg ((inst), (opc), (dreg), (reg)); \
+ *(inst)++ = (unsigned char)(imm); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_membase_imm(inst,opc,dreg,basereg,disp,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \
+ x86_sse_alu_pd_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \
+ *(inst)++ = (unsigned char)(imm); \
+ } while (0)
+
+
+#define x86_sse_alu_ps_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_ps_reg_reg_imm(inst,opc,dreg,reg, imm) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ *(inst)++ = (unsigned char)imm; \
+ } while (0)
+
+
+#define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xF2; \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_sd_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xF2; \
+ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \
+ } while (0)
+
+
+#define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xF3; \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_ss_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xF3; \
+ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \
+ } while (0)
+
+
+
+#define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)0x38; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_movups_reg_membase(inst,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movups_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movaps_reg_membase(inst,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x28; \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movaps_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x29; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movaps_reg_reg(inst,dreg,sreg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x28; \
+ x86_reg_emit ((inst), (dreg), (sreg)); \
+ } while (0)
+
+
+#define x86_movd_reg_xreg(inst,dreg,sreg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x7e; \
+ x86_reg_emit ((inst), (sreg), (dreg)); \
+ } while (0)
+
+#define x86_movd_xreg_reg(inst,dreg,sreg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x6e; \
+ x86_reg_emit ((inst), (dreg), (sreg)); \
+ } while (0)
+
+#define x86_movd_xreg_membase(inst,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x6e; \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_pshufw_reg_reg(inst,dreg,sreg,mask,high_words) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)(high_words) ? 0xF3 : 0xF2; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x70; \
+ x86_reg_emit ((inst), (dreg), (sreg)); \
+ *(inst)++ = (unsigned char)mask; \
+ } while (0)
+
+#define x86_sse_shift_reg_imm(inst,opc,mode, dreg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ x86_sse_alu_pd_reg_reg (inst, opc, mode, dreg); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_sse_shift_reg_reg(inst,opc,dreg,sreg) \
+ do { \
+ x86_sse_alu_pd_reg_reg (inst, opc, dreg, sreg); \
+ } while (0)
+
+
+
+#endif // X86_H
+
diff --git a/lib/ffts/src/codegen.c b/lib/ffts/src/codegen.c
index a66ecda..c4e19e6 100644
--- a/lib/ffts/src/codegen.c
+++ b/lib/ffts/src/codegen.c
@@ -1,22 +1,22 @@
/*
-
+
This file is part of FFTS -- The Fastest Fourier Transform in the South
-
+
Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
+ Copyright (c) 2012, The University of Waikato
+
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the organization nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
@@ -33,699 +33,581 @@
#include "codegen.h"
#include "macros.h"
-#include "ffts.h"
-#ifdef __APPLE__
- #include <libkern/OSCacheControl.h>
+#ifdef __arm__
+typedef uint32_t insns_t;
+#else
+typedef uint8_t insns_t;
#endif
-#include <sys/types.h>
-#include <sys/mman.h>
-
#ifdef HAVE_NEON
- #include "codegen_arm.h"
- #include "neon.h"
+#include "codegen_arm.h"
+#include "neon.h"
#elif HAVE_VFP
- #include "codegen_arm.h"
- #include "vfp.h"
+#include "vfp.h"
+#include "codegen_arm.h"
#else
- #include "codegen_sse.h"
- #include "macros-sse.h"
+#include "codegen_sse.h"
#endif
-#ifdef __ANDROID__
- #include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <stddef.h>
+/* #include <stdio.h> */
+
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
#endif
-int tree_count(int N, int leafN, int offset) {
-
- if(N <= leafN) return 0;
- int count = 0;
- count += tree_count(N/4, leafN, offset);
- count += tree_count(N/8, leafN, offset + N/4);
- count += tree_count(N/8, leafN, offset + N/4 + N/8);
- count += tree_count(N/4, leafN, offset + N/2);
- count += tree_count(N/4, leafN, offset + 3*N/4);
-
- return 1 + count;
-}
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+static int ffts_tree_count(int N, int leaf_N, int offset)
+{
+ int count;
-void elaborate_tree(size_t **p, int N, int leafN, int offset) {
-
- if(N <= leafN) return;
- elaborate_tree(p, N/4, leafN, offset);
- elaborate_tree(p, N/8, leafN, offset + N/4);
- elaborate_tree(p, N/8, leafN, offset + N/4 + N/8);
- elaborate_tree(p, N/4, leafN, offset + N/2);
- elaborate_tree(p, N/4, leafN, offset + 3*N/4);
+ if (N <= leaf_N) {
+ return 0;
+ }
- (*p)[0] = N;
- (*p)[1] = offset*2;
+ count = ffts_tree_count(N/4, leaf_N, offset);
+ count += ffts_tree_count(N/8, leaf_N, offset + N/4);
+ count += ffts_tree_count(N/8, leaf_N, offset + N/4 + N/8);
+ count += ffts_tree_count(N/4, leaf_N, offset + N/2);
+ count += ffts_tree_count(N/4, leaf_N, offset + 3*N/4);
- (*p)+=2;
+ return 1 + count;
}
+static void ffts_elaborate_tree(size_t **p, int N, int leaf_N, int offset)
+{
+ if (N <= leaf_N) {
+ return;
+ }
+ ffts_elaborate_tree(p, N/4, leaf_N, offset);
+ ffts_elaborate_tree(p, N/8, leaf_N, offset + N/4);
+ ffts_elaborate_tree(p, N/8, leaf_N, offset + N/4 + N/8);
+ ffts_elaborate_tree(p, N/4, leaf_N, offset + N/2);
+ ffts_elaborate_tree(p, N/4, leaf_N, offset + 3*N/4);
+ (*p)[0] = N;
+ (*p)[1] = 2 * offset;
-uint32_t LUT_offset(size_t N, size_t leafN) {
- int i;
- size_t p_lut_size = 0;
- size_t lut_size = 0;
- int hardcoded = 0;
- size_t n_luts = __builtin_ctzl(N/leafN);
- int n = leafN*2;
- //if(N <= 32) { n_luts = __builtin_ctzl(N/4); hardcoded = 1; }
-
- for(i=0;i<n_luts-1;i++) {
- p_lut_size = lut_size;
- if(!i || hardcoded) {
- #ifdef __arm__
- if(N <= 32) lut_size += n/4 * 2 * sizeof(cdata_t);
- else lut_size += n/4 * sizeof(cdata_t);
- #else
- lut_size += n/4 * 2 * sizeof(cdata_t);
- #endif
- // n *= 2;
- } else {
- #ifdef __arm__
- lut_size += n/8 * 3 * sizeof(cdata_t);
- #else
- lut_size += n/8 * 3 * 2 * sizeof(cdata_t);
- #endif
- }
- n *= 2;
- }
- return lut_size;
+ (*p) += 2;
}
-#ifdef __arm__
- typedef uint32_t insns_t;
-#else
- typedef uint8_t insns_t;
-#endif
+transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N, int sign)
+{
+ uint32_t offsets[8] = {0, 4*N, 2*N, 6*N, N, 5*N, 7*N, 3*N};
+ uint32_t offsets_o[8] = {0, 4*N, 2*N, 6*N, 7*N, 3*N, N, 5*N};
-#define P(x) (*(*p)++ = x)
-
-void insert_nops(uint8_t **p, uint32_t count) {
- switch(count) {
- case 0: break;
- case 2: P(0x66);
- case 1: P(0x90); break;
- case 3: P(0x0F); P(0x1F); P(0x00); break;
- case 4: P(0x0F); P(0x1F); P(0x40); P(0x00); break;
- case 5: P(0x0F); P(0x1F); P(0x44); P(0x00); P(0x00); break;
- case 6: P(0x66); P(0x0F); P(0x1F); P(0x44); P(0x00); P(0x00); break;
- case 7: P(0x0F); P(0x1F); P(0x80); P(0x00); P(0x00); P(0x00); P(0x00); break;
- case 8: P(0x0F); P(0x1F); P(0x84); P(0x00); P(0x00); P(0x00); P(0x00); P(0x00); break;
- case 9: P(0x66); P(0x0F); P(0x1F); P(0x84); P(0x00); P(0x00); P(0x00); P(0x00); P(0x00); break;
- default:
- P(0x66); P(0x0F); P(0x1F); P(0x84); P(0x00); P(0x00); P(0x00); P(0x00); P(0x00);
- insert_nops(p, count-9);
- break;
- }
-}
+ int32_t pAddr = 0;
+ int32_t pN = 0;
+ int32_t pLUT = 0;
+ insns_t *fp;
+ insns_t *start;
+ insns_t *x_4_addr;
+ insns_t *x_8_addr;
+ uint32_t loop_count;
-void align_mem16(uint8_t **p, uint32_t offset) {
-#ifdef __x86_64__
- int r = (16 - (offset & 0xf)) - ((uint32_t)(*p) & 0xf);
- r = (16 + r) & 0xf;
- insert_nops(p, r);
-#endif
-}
+ int count;
+ ptrdiff_t len;
-void ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leafN, int sign) {
- int count = tree_count(N, leafN, 0) + 1;
- size_t *ps = malloc(count * 2 * sizeof(size_t));
- size_t *pps = ps;
+ size_t *ps;
+ size_t *pps;
-#ifdef __x86_64__
- if(sign < 0) p->constants = sse_constants;
- else p->constants = sse_constants_inv;
-#endif
+ count = ffts_tree_count(N, leaf_N, 0) + 1;
- elaborate_tree(&pps, N, leafN, 0);
- pps[0] = 0;
- pps[1] = 0;
+ ps = pps = malloc(2 * count * sizeof(*ps));
+ if (!ps) {
+ return NULL;
+ }
- pps = ps;
+ ffts_elaborate_tree(&pps, N, leaf_N, 0);
-#ifdef __arm__
- if(N < 8192) p->transform_size = 8192;
- else p->transform_size = N;
-#else
- if(N < 2048) p->transform_size = 16384;
- else p->transform_size = 16384 + 2*N/8 * __builtin_ctzl(N);
-#endif
+ pps[0] = 0;
+ pps[1] = 0;
-#ifdef __APPLE__
- p->transform_base = mmap(NULL, p->transform_size, PROT_WRITE | PROT_READ, MAP_ANON | MAP_SHARED, -1, 0);
-#else
-#define MAP_ANONYMOUS 0x20
- p->transform_base = mmap(NULL, p->transform_size, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
+ pps = ps;
+
+#ifdef HAVE_SSE
+ if (sign < 0) {
+ p->constants = sse_constants;
+ } else {
+ p->constants = sse_constants_inv;
+ }
#endif
-/*
- if(p->transform_base == MAP_FAILED) {
- fprintf(stderr, "MAP FAILED\n");
- exit(1);
- }*/
- insns_t *func = p->transform_base;//valloc(8192);
- insns_t *fp = func;
-
-//fprintf(stderr, "Allocating %d bytes \n", p->transform_size);
-//fprintf(stderr, "Base address = %016p\n", func);
-
- if(!func) {
- fprintf(stderr, "NOMEM\n");
- exit(1);
- }
-
- insns_t *x_8_addr = fp;
+ fp = (insns_t*) p->transform_base;
+
+ /* generate base cases */
+ x_4_addr = generate_size4_base_case(&fp, sign);
+ x_8_addr = generate_size8_base_case(&fp, sign);
+
#ifdef __arm__
+ start = generate_prologue(&fp, p);
+
#ifdef HAVE_NEON
- memcpy(fp, neon_x8, neon_x8_t - neon_x8);
- /*
- * Changes adds to subtracts and vice versa to allow the computation
- * of both the IFFT and FFT
- */
- if(sign < 0) {
- fp[31] ^= 0x00200000; fp[32] ^= 0x00200000; fp[33] ^= 0x00200000; fp[34] ^= 0x00200000;
- fp[65] ^= 0x00200000; fp[66] ^= 0x00200000; fp[70] ^= 0x00200000; fp[74] ^= 0x00200000;
- fp[97] ^= 0x00200000; fp[98] ^= 0x00200000; fp[102] ^= 0x00200000; fp[104] ^= 0x00200000;
- }
- fp += (neon_x8_t - neon_x8) / 4;
+ memcpy(fp, neon_ee, neon_oo - neon_ee);
+ if (sign < 0) {
+ fp[33] ^= 0x00200000;
+ fp[37] ^= 0x00200000;
+ fp[38] ^= 0x00200000;
+ fp[39] ^= 0x00200000;
+ fp[40] ^= 0x00200000;
+ fp[41] ^= 0x00200000;
+ fp[44] ^= 0x00200000;
+ fp[45] ^= 0x00200000;
+ fp[46] ^= 0x00200000;
+ fp[47] ^= 0x00200000;
+ fp[48] ^= 0x00200000;
+ fp[57] ^= 0x00200000;
+ }
+
+ fp += (neon_oo - neon_ee) / 4;
#else
- memcpy(fp, vfp_x8, vfp_end - vfp_x8);
- if(sign > 0) {
- fp[65] ^= 0x00000040;
- fp[66] ^= 0x00000040;
- fp[68] ^= 0x00000040;
- fp[70] ^= 0x00000040;
- fp[103] ^= 0x00000040;
- fp[104] ^= 0x00000040;
- fp[105] ^= 0x00000040;
- fp[108] ^= 0x00000040;
- fp[113] ^= 0x00000040;
- fp[114] ^= 0x00000040;
- fp[117] ^= 0x00000040;
- fp[118] ^= 0x00000040;
- }
- fp += (vfp_end - vfp_x8) / 4;
+ memcpy(fp, vfp_e, vfp_o - vfp_e);
+
+ if (sign > 0) {
+ fp[64] ^= 0x00000040;
+ fp[65] ^= 0x00000040;
+ fp[68] ^= 0x00000040;
+ fp[75] ^= 0x00000040;
+ fp[76] ^= 0x00000040;
+ fp[79] ^= 0x00000040;
+ fp[80] ^= 0x00000040;
+ fp[83] ^= 0x00000040;
+ fp[84] ^= 0x00000040;
+ fp[87] ^= 0x00000040;
+ fp[91] ^= 0x00000040;
+ fp[93] ^= 0x00000040;
+ }
+ fp += (vfp_o - vfp_e) / 4;
#endif
#else
- align_mem16(&fp, 0);
- x_8_addr = fp;
- align_mem16(&fp, 5);
- memcpy(fp, x8_soft, x8_hard - x8_soft);
- fp += (x8_hard - x8_soft);
-//fprintf(stderr, "X8 start address = %016p\n", x_8_addr);
-#endif
-//uint32_t *x_8_t_addr = fp;
-//memcpy(fp, neon_x8_t, neon_end - neon_x8_t);
-//fp += (neon_end - neon_x8_t) / 4;
- insns_t *x_4_addr = fp;
-#ifdef __arm__
- #ifdef HAVE_NEON
- memcpy(fp, neon_x4, neon_x8 - neon_x4);
- if(sign < 0) {
- fp[26] ^= 0x00200000; fp[28] ^= 0x00200000; fp[31] ^= 0x00200000; fp[32] ^= 0x00200000;
- }
- fp += (neon_x8 - neon_x4) / 4;
- #else
- memcpy(fp, vfp_x4, vfp_x8 - vfp_x4);
- if(sign > 0) {
- fp[36] ^= 0x00000040;
- fp[38] ^= 0x00000040;
- fp[43] ^= 0x00000040;
- fp[44] ^= 0x00000040;
- }
- fp += (vfp_x8 - vfp_x4) / 4;
- #endif
-#else
- align_mem16(&fp, 0);
- x_4_addr = fp;
- memcpy(fp, x4, x8_soft - x4);
- fp += (x8_soft - x4);
+ /* generate functions */
+ start = generate_prologue(&fp, p);
-#endif
- insns_t *start = fp;
-
-#ifdef __arm__
- *fp = PUSH_LR(); fp++;
- *fp = 0xed2d8b10; fp++;
-
- ADDI(&fp, 3, 1, 0);
- ADDI(&fp, 7, 1, N);
- ADDI(&fp, 5, 1, 2*N);
- ADDI(&fp, 10, 7, 2*N);
- ADDI(&fp, 4, 5, 2*N);
- ADDI(&fp, 8, 10, 2*N);
- ADDI(&fp, 6, 4, 2*N);
- ADDI(&fp, 9, 8, 2*N);
-
- *fp = LDRI(12, 0, ((uint32_t)&p->offsets) - ((uint32_t)p)); fp++; // load offsets into r12
-// *fp++ = LDRI(1, 0, 4); // load ws into r1
- ADDI(&fp, 1, 0, 0);
-
- ADDI(&fp, 0, 2, 0), // mov out into r0
+ loop_count = 4 * p->i0;
+ generate_leaf_init(&fp, loop_count);
+
+ if (ffts_ctzl(N) & 1) {
+ generate_leaf_ee(&fp, offsets, p->i1 ? 6 : 0);
+
+ if (p->i1) {
+ loop_count += 4 * p->i1;
+ generate_leaf_oo(&fp, loop_count, offsets_o, 7);
+ }
+
+ loop_count += 4;
+ generate_leaf_oe(&fp, offsets_o);
+ } else {
+ generate_leaf_ee(&fp, offsets, N >= 256 ? 2 : 8);
+
+ loop_count += 4;
+ generate_leaf_eo(&fp, offsets);
+
+ if (p->i1) {
+ loop_count += 4 * p->i1;
+ generate_leaf_oo(&fp, loop_count, offsets_o, N >= 256 ? 4 : 7);
+ }
+ }
+
+ if (p->i1) {
+ uint32_t offsets_oe[8] = {7*N, 3*N, N, 5*N, 0, 4*N, 6*N, 2*N};
+
+ loop_count += 4 * p->i1;
+
+ /* align loop/jump destination */
+#ifdef _M_X64
+ x86_mov_reg_imm(fp, X86_EBX, loop_count);
+#else
+ x86_mov_reg_imm(fp, X86_ECX, loop_count);
+ ffts_align_mem16(&fp, 9);
#endif
+ generate_leaf_ee(&fp, offsets_oe, 0);
+ }
-#ifdef __arm__
- *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); fp++;
- #ifdef HAVE_NEON
- MOVI(&fp, 11, p->i0);
- #else
- MOVI(&fp, 11, p->i0);
- #endif
+ generate_transform_init(&fp);
+ /* generate subtransform calls */
+ count = 2;
+ while (pps[0]) {
+ size_t ws_is;
+
+ if (!pN) {
+#ifdef _M_X64
+ x86_mov_reg_imm(fp, X86_EBX, pps[0]);
#else
- align_mem16(&fp, 0);
- start = fp;
-
- *fp++ = 0x4c;
- *fp++ = 0x8b;
- *fp++ = 0x07;
- uint32_t lp_cnt = p->i0 * 4;
- MOVI(&fp, RCX, lp_cnt);
-
- //LEA(&fp, R8, RDI, ((uint32_t)&p->offsets) - ((uint32_t)p));
+ x86_mov_reg_imm(fp, X86_ECX, pps[0] / 4);
#endif
- //fp++;
-#ifdef __arm__
-#ifdef HAVE_NEON
- memcpy(fp, neon_ee, neon_oo - neon_ee);
- if(sign < 0) {
- fp[33] ^= 0x00200000; fp[37] ^= 0x00200000; fp[38] ^= 0x00200000; fp[39] ^= 0x00200000;
- fp[40] ^= 0x00200000; fp[41] ^= 0x00200000; fp[44] ^= 0x00200000; fp[45] ^= 0x00200000;
- fp[46] ^= 0x00200000; fp[47] ^= 0x00200000; fp[48] ^= 0x00200000; fp[57] ^= 0x00200000;
- }
- fp += (neon_oo - neon_ee) / 4;
+ } else {
+ int offset = (4 * pps[1]) - pAddr;
+ if (offset) {
+#ifdef _M_X64
+ x64_alu_reg_imm_size(fp, X86_ADD, X64_R8, offset, 8);
#else
- memcpy(fp, vfp_e, vfp_o - vfp_e);
- if(sign > 0) {
- fp[64] ^= 0x00000040; fp[65] ^= 0x00000040; fp[68] ^= 0x00000040; fp[75] ^= 0x00000040;
- fp[76] ^= 0x00000040; fp[79] ^= 0x00000040; fp[80] ^= 0x00000040; fp[83] ^= 0x00000040;
- fp[84] ^= 0x00000040; fp[87] ^= 0x00000040; fp[91] ^= 0x00000040; fp[93] ^= 0x00000040;
- }
- fp += (vfp_o - vfp_e) / 4;
+ x64_alu_reg_imm_size(fp, X86_ADD, X64_RDX, offset, 8);
#endif
+ }
+
+ if (pps[0] > leaf_N && pps[0] - pN) {
+ int factor = ffts_ctzl(pps[0]) - ffts_ctzl(pN);
+
+#ifdef _M_X64
+ if (factor > 0) {
+ x86_shift_reg_imm(fp, X86_SHL, X86_EBX, factor);
+ } else {
+ x86_shift_reg_imm(fp, X86_SHR, X86_EBX, -factor);
+ }
#else
-//fprintf(stderr, "Body start address = %016p\n", start);
-
- PUSH(&fp, RBP);
- PUSH(&fp, RBX);
- PUSH(&fp, R10);
- PUSH(&fp, R11);
- PUSH(&fp, R12);
- PUSH(&fp, R13);
- PUSH(&fp, R14);
- PUSH(&fp, R15);
-
- int i;
- memcpy(fp, leaf_ee_init, leaf_ee - leaf_ee_init);
-
-//fprintf(stderr, "Leaf ee init address = %016p\n", leaf_ee_init);
-//fprintf(stderr, "Constants address = %016p\n", sse_constants);
-//fprintf(stderr, "Constants address = %016p\n", p->constants);
-
-//int32_t val = READ_IMM32(fp + 3);
-//fprintf(stderr, "diff = 0x%x\n", ((uint32_t)&p->constants) - ((uint32_t)p));
-
-//int64_t v2 = val + (int64_t)((void *)leaf_ee_init - (void *)fp );
-//fprintf(stderr, "IMM = 0x%llx\n", v2);
-
-//IMM32_NI(fp + 3, ((int64_t) READ_IMM32(fp + 3)) + ((void *)leaf_ee_init - (void *)fp ));
- fp += (leaf_ee - leaf_ee_init);
-
-//fprintf(stderr, "Leaf start address = %016p\n", fp);
- align_mem16(&fp, 9);
- memcpy(fp, leaf_ee, leaf_oo - leaf_ee);
-
-
- uint32_t offsets[8] = {0, N, N/2, 3*N/2, N/4, 5*N/4, 7*N/4, 3*N/4};
- uint32_t offsets_o[8] = {0, N, N/2, 3*N/2, 7*N/4, 3*N/4, N/4, 5*N/4};
- uint32_t offsets_oe[8] = {7*N/4, 3*N/4, N/4, 5*N/4, 0, N, 3*N/2, N/2};
-
- for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_ee_offsets[i], offsets[i]*4);
-
- fp += (leaf_oo - leaf_ee);
-
- if(__builtin_ctzl(N) & 1){
-
- if(p->i1) {
- lp_cnt += p->i1 * 4;
- MOVI(&fp, RCX, lp_cnt);
- align_mem16(&fp, 4);
- memcpy(fp, leaf_oo, leaf_eo - leaf_oo);
- for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_oo_offsets[i], offsets_o[i]*4);
- fp += (leaf_eo - leaf_oo);
- }
-
-
- memcpy(fp, leaf_oe, leaf_end - leaf_oe);
- lp_cnt += 4;
- for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_oe_offsets[i], offsets_o[i]*4);
- fp += (leaf_end - leaf_oe);
-
- }else{
-
-
- memcpy(fp, leaf_eo, leaf_oe - leaf_eo);
- lp_cnt += 4;
- for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_eo_offsets[i], offsets[i]*4);
- fp += (leaf_oe - leaf_eo);
-
- if(p->i1) {
- lp_cnt += p->i1 * 4;
- MOVI(&fp, RCX, lp_cnt);
- align_mem16(&fp, 4);
- memcpy(fp, leaf_oo, leaf_eo - leaf_oo);
- for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_oo_offsets[i], offsets_o[i]*4);
- fp += (leaf_eo - leaf_oo);
- }
-
- }
- if(p->i1) {
- lp_cnt += p->i1 * 4;
- MOVI(&fp, RCX, lp_cnt);
- align_mem16(&fp, 9);
- memcpy(fp, leaf_ee, leaf_oo - leaf_ee);
- for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_ee_offsets[i], offsets_oe[i]*4);
- fp += (leaf_oo - leaf_ee);
-
- }
-
-//fprintf(stderr, "Body start address = %016p\n", fp);
- //LEA(&fp, R8, RDI, ((uint32_t)&p->ws) - ((uint32_t)p));
- memcpy(fp, x_init, x4 - x_init);
-//IMM32_NI(fp + 3, ((int64_t)READ_IMM32(fp + 3)) + ((void *)x_init - (void *)fp ));
- fp += (x4 - x_init);
-
- int32_t pAddr = 0;
- int32_t pN = 0;
- int32_t pLUT = 0;
- count = 2;
- while(pps[0]) {
-
- if(!pN) {
- MOVI(&fp, RCX, pps[0] / 4);
- }else{
- if((pps[1]*4)-pAddr) ADDI(&fp, RDX, (pps[1] * 4)- pAddr);
- if(pps[0] > leafN && pps[0] - pN) {
-
- int diff = __builtin_ctzl(pps[0]) - __builtin_ctzl(pN);
- *fp++ = 0xc1;
-
- if(diff > 0) {
- *fp++ = 0xe1;
- *fp++ = (diff & 0xff);
- }else{
- *fp++ = 0xe9;
- *fp++ = ((-diff) & 0xff);
- }
- }
- }
-
- if(p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT)
- ADDI(&fp, R8, p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT);
-
-
- if(pps[0] == 2*leafN) {
- CALL(&fp, x_4_addr);
- // }else if(!pps[2]){
- // //uint32_t *x_8_t_addr = fp;
- // memcpy(fp, neon_x8_t, neon_ee - neon_x8_t);
- // fp += (neon_ee - neon_x8_t) / 4;
- // //*fp++ = BL(fp+2, x_8_t_addr);
- }else{
- CALL(&fp, x_8_addr);
- }
-
- pAddr = pps[1] * 4;
- if(pps[0] > leafN)
- pN = pps[0];
- pLUT = p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8;//LUT_offset(pps[0], leafN);
-// fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT);
- count += 4;
- pps += 2;
- }
+ if (factor > 0) {
+ x86_shift_reg_imm(fp, X86_SHL, X86_ECX, factor);
+ } else {
+ x86_shift_reg_imm(fp, X86_SHR, X86_ECX, -factor);
+ }
+#endif
+ }
+ }
+
+ ws_is = 8 * p->ws_is[ffts_ctzl(pps[0] / leaf_N) - 1];
+ if (ws_is != pLUT) {
+ int offset = (int) (ws_is - pLUT);
+
+#ifdef _M_X64
+ x64_alu_reg_imm_size(fp, X86_ADD, X64_R9, offset, 8);
+#else
+ x64_alu_reg_imm_size(fp, X86_ADD, X64_R8, offset, 8);
+#endif
+ }
+
+ if (pps[0] == 2 * leaf_N) {
+ x64_call_code(fp, x_4_addr);
+ } else {
+ x64_call_code(fp, x_8_addr);
+ }
+
+ pAddr = 4 * pps[1];
+ if (pps[0] > leaf_N) {
+ pN = pps[0];
+ }
+
+ pLUT = ws_is;//LUT_offset(pps[0], leafN);
+ //fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT);
+ count += 4;
+ pps += 2;
+ }
#endif
+
#ifdef __arm__
#ifdef HAVE_NEON
- if(__builtin_ctzl(N) & 1){
- ADDI(&fp, 2, 7, 0);
- ADDI(&fp, 7, 9, 0);
- ADDI(&fp, 9, 2, 0);
-
- ADDI(&fp, 2, 8, 0);
- ADDI(&fp, 8, 10, 0);
- ADDI(&fp, 10, 2, 0);
-
- if(p->i1) {
- MOVI(&fp, 11, p->i1);
- memcpy(fp, neon_oo, neon_eo - neon_oo);
- if(sign < 0) {
- fp[12] ^= 0x00200000; fp[13] ^= 0x00200000; fp[14] ^= 0x00200000; fp[15] ^= 0x00200000;
- fp[27] ^= 0x00200000; fp[29] ^= 0x00200000; fp[30] ^= 0x00200000; fp[31] ^= 0x00200000;
- fp[46] ^= 0x00200000; fp[47] ^= 0x00200000; fp[48] ^= 0x00200000; fp[57] ^= 0x00200000;
- }
- fp += (neon_eo - neon_oo) / 4;
- }
-
- *fp = LDRI(11, 1, ((uint32_t)&p->oe_ws) - ((uint32_t)p)); fp++;
-
- memcpy(fp, neon_oe, neon_end - neon_oe);
- if(sign < 0) {
- fp[19] ^= 0x00200000; fp[20] ^= 0x00200000; fp[22] ^= 0x00200000; fp[23] ^= 0x00200000;
- fp[37] ^= 0x00200000; fp[38] ^= 0x00200000; fp[40] ^= 0x00200000; fp[41] ^= 0x00200000;
- fp[64] ^= 0x00200000; fp[65] ^= 0x00200000; fp[66] ^= 0x00200000; fp[67] ^= 0x00200000;
- }
- fp += (neon_end - neon_oe) / 4;
-
- }else{
-
- *fp = LDRI(11, 1, ((uint32_t)&p->eo_ws) - ((uint32_t)p)); fp++;
-
- memcpy(fp, neon_eo, neon_oe - neon_eo);
- if(sign < 0) {
- fp[10] ^= 0x00200000; fp[11] ^= 0x00200000; fp[13] ^= 0x00200000; fp[14] ^= 0x00200000;
- fp[31] ^= 0x00200000; fp[33] ^= 0x00200000; fp[34] ^= 0x00200000; fp[35] ^= 0x00200000;
- fp[59] ^= 0x00200000; fp[60] ^= 0x00200000; fp[61] ^= 0x00200000; fp[62] ^= 0x00200000;
- }
- fp += (neon_oe - neon_eo) / 4;
-
- ADDI(&fp, 2, 7, 0);
- ADDI(&fp, 7, 9, 0);
- ADDI(&fp, 9, 2, 0);
-
- ADDI(&fp, 2, 8, 0);
- ADDI(&fp, 8, 10, 0);
- ADDI(&fp, 10, 2, 0);
-
- if(p->i1) {
- MOVI(&fp, 11, p->i1);
- memcpy(fp, neon_oo, neon_eo - neon_oo);
- if(sign < 0) {
- fp[12] ^= 0x00200000; fp[13] ^= 0x00200000; fp[14] ^= 0x00200000; fp[15] ^= 0x00200000;
- fp[27] ^= 0x00200000; fp[29] ^= 0x00200000; fp[30] ^= 0x00200000; fp[31] ^= 0x00200000;
- fp[46] ^= 0x00200000; fp[47] ^= 0x00200000; fp[48] ^= 0x00200000; fp[57] ^= 0x00200000;
- }
- fp += (neon_eo - neon_oo) / 4;
- }
-
- }
-
-
- if(p->i1) {
- ADDI(&fp, 2, 3, 0);
- ADDI(&fp, 3, 7, 0);
- ADDI(&fp, 7, 2, 0);
-
- ADDI(&fp, 2, 4, 0);
- ADDI(&fp, 4, 8, 0);
- ADDI(&fp, 8, 2, 0);
-
- ADDI(&fp, 2, 5, 0);
- ADDI(&fp, 5, 9, 0);
- ADDI(&fp, 9, 2, 0);
-
- ADDI(&fp, 2, 6, 0);
- ADDI(&fp, 6, 10, 0);
- ADDI(&fp, 10, 2, 0);
-
- ADDI(&fp, 2, 9, 0);
- ADDI(&fp, 9, 10, 0);
- ADDI(&fp, 10, 2, 0);
-
- *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); fp++;
- MOVI(&fp, 11, p->i1);
- memcpy(fp, neon_ee, neon_oo - neon_ee);
- if(sign < 0) {
- fp[33] ^= 0x00200000; fp[37] ^= 0x00200000; fp[38] ^= 0x00200000; fp[39] ^= 0x00200000;
- fp[40] ^= 0x00200000; fp[41] ^= 0x00200000; fp[44] ^= 0x00200000; fp[45] ^= 0x00200000;
- fp[46] ^= 0x00200000; fp[47] ^= 0x00200000; fp[48] ^= 0x00200000; fp[57] ^= 0x00200000;
- }
- fp += (neon_oo - neon_ee) / 4;
-
- }
+ if (ffts_ctzl(N) & 1) {
+ ADDI(&fp, 2, 7, 0);
+ ADDI(&fp, 7, 9, 0);
+ ADDI(&fp, 9, 2, 0);
+
+ ADDI(&fp, 2, 8, 0);
+ ADDI(&fp, 8, 10, 0);
+ ADDI(&fp, 10, 2, 0);
+
+ if(p->i1) {
+ MOVI(&fp, 11, p->i1);
+ memcpy(fp, neon_oo, neon_eo - neon_oo);
+ if(sign < 0) {
+ fp[12] ^= 0x00200000;
+ fp[13] ^= 0x00200000;
+ fp[14] ^= 0x00200000;
+ fp[15] ^= 0x00200000;
+ fp[27] ^= 0x00200000;
+ fp[29] ^= 0x00200000;
+ fp[30] ^= 0x00200000;
+ fp[31] ^= 0x00200000;
+ fp[46] ^= 0x00200000;
+ fp[47] ^= 0x00200000;
+ fp[48] ^= 0x00200000;
+ fp[57] ^= 0x00200000;
+ }
+ fp += (neon_eo - neon_oo) / 4;
+ }
+
+ *fp = LDRI(11, 1, ((uint32_t)&p->oe_ws) - ((uint32_t)p));
+ fp++;
+
+ memcpy(fp, neon_oe, neon_end - neon_oe);
+ if(sign < 0) {
+ fp[19] ^= 0x00200000;
+ fp[20] ^= 0x00200000;
+ fp[22] ^= 0x00200000;
+ fp[23] ^= 0x00200000;
+ fp[37] ^= 0x00200000;
+ fp[38] ^= 0x00200000;
+ fp[40] ^= 0x00200000;
+ fp[41] ^= 0x00200000;
+ fp[64] ^= 0x00200000;
+ fp[65] ^= 0x00200000;
+ fp[66] ^= 0x00200000;
+ fp[67] ^= 0x00200000;
+ }
+ fp += (neon_end - neon_oe) / 4;
+
+ } else {
+
+ *fp = LDRI(11, 1, ((uint32_t)&p->eo_ws) - ((uint32_t)p));
+ fp++;
+
+ memcpy(fp, neon_eo, neon_oe - neon_eo);
+ if(sign < 0) {
+ fp[10] ^= 0x00200000;
+ fp[11] ^= 0x00200000;
+ fp[13] ^= 0x00200000;
+ fp[14] ^= 0x00200000;
+ fp[31] ^= 0x00200000;
+ fp[33] ^= 0x00200000;
+ fp[34] ^= 0x00200000;
+ fp[35] ^= 0x00200000;
+ fp[59] ^= 0x00200000;
+ fp[60] ^= 0x00200000;
+ fp[61] ^= 0x00200000;
+ fp[62] ^= 0x00200000;
+ }
+ fp += (neon_oe - neon_eo) / 4;
+
+ ADDI(&fp, 2, 7, 0);
+ ADDI(&fp, 7, 9, 0);
+ ADDI(&fp, 9, 2, 0);
+
+ ADDI(&fp, 2, 8, 0);
+ ADDI(&fp, 8, 10, 0);
+ ADDI(&fp, 10, 2, 0);
+
+ if(p->i1) {
+ MOVI(&fp, 11, p->i1);
+ memcpy(fp, neon_oo, neon_eo - neon_oo);
+ if(sign < 0) {
+ fp[12] ^= 0x00200000;
+ fp[13] ^= 0x00200000;
+ fp[14] ^= 0x00200000;
+ fp[15] ^= 0x00200000;
+ fp[27] ^= 0x00200000;
+ fp[29] ^= 0x00200000;
+ fp[30] ^= 0x00200000;
+ fp[31] ^= 0x00200000;
+ fp[46] ^= 0x00200000;
+ fp[47] ^= 0x00200000;
+ fp[48] ^= 0x00200000;
+ fp[57] ^= 0x00200000;
+ }
+ fp += (neon_eo - neon_oo) / 4;
+ }
+ }
+
+ if(p->i1) {
+ ADDI(&fp, 2, 3, 0);
+ ADDI(&fp, 3, 7, 0);
+ ADDI(&fp, 7, 2, 0);
+
+ ADDI(&fp, 2, 4, 0);
+ ADDI(&fp, 4, 8, 0);
+ ADDI(&fp, 8, 2, 0);
+
+ ADDI(&fp, 2, 5, 0);
+ ADDI(&fp, 5, 9, 0);
+ ADDI(&fp, 9, 2, 0);
+
+ ADDI(&fp, 2, 6, 0);
+ ADDI(&fp, 6, 10, 0);
+ ADDI(&fp, 10, 2, 0);
+
+ ADDI(&fp, 2, 9, 0);
+ ADDI(&fp, 9, 10, 0);
+ ADDI(&fp, 10, 2, 0);
+
+ *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p));
+ fp++;
+ MOVI(&fp, 11, p->i1);
+ memcpy(fp, neon_ee, neon_oo - neon_ee);
+ if(sign < 0) {
+ fp[33] ^= 0x00200000;
+ fp[37] ^= 0x00200000;
+ fp[38] ^= 0x00200000;
+ fp[39] ^= 0x00200000;
+ fp[40] ^= 0x00200000;
+ fp[41] ^= 0x00200000;
+ fp[44] ^= 0x00200000;
+ fp[45] ^= 0x00200000;
+ fp[46] ^= 0x00200000;
+ fp[47] ^= 0x00200000;
+ fp[48] ^= 0x00200000;
+ fp[57] ^= 0x00200000;
+ }
+ fp += (neon_oo - neon_ee) / 4;
+ }
#else
- ADDI(&fp, 2, 7, 0);
- ADDI(&fp, 7, 9, 0);
- ADDI(&fp, 9, 2, 0);
-
- ADDI(&fp, 2, 8, 0);
- ADDI(&fp, 8, 10, 0);
- ADDI(&fp, 10, 2, 0);
-
- MOVI(&fp, 11, (p->i1>0) ? p->i1 : 1);
- memcpy(fp, vfp_o, vfp_x4 - vfp_o);
- if(sign > 0) {
- fp[22] ^= 0x00000040; fp[24] ^= 0x00000040; fp[25] ^= 0x00000040; fp[26] ^= 0x00000040;
- fp[62] ^= 0x00000040; fp[64] ^= 0x00000040; fp[65] ^= 0x00000040; fp[66] ^= 0x00000040;
- }
- fp += (vfp_x4 - vfp_o) / 4;
-
- ADDI(&fp, 2, 3, 0);
- ADDI(&fp, 3, 7, 0);
- ADDI(&fp, 7, 2, 0);
-
- ADDI(&fp, 2, 4, 0);
- ADDI(&fp, 4, 8, 0);
- ADDI(&fp, 8, 2, 0);
-
- ADDI(&fp, 2, 5, 0);
- ADDI(&fp, 5, 9, 0);
- ADDI(&fp, 9, 2, 0);
-
- ADDI(&fp, 2, 6, 0);
- ADDI(&fp, 6, 10, 0);
- ADDI(&fp, 10, 2, 0);
-
- ADDI(&fp, 2, 9, 0);
- ADDI(&fp, 9, 10, 0);
- ADDI(&fp, 10, 2, 0);
-
- *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); fp++;
- MOVI(&fp, 11, (p->i2>0) ? p->i2 : 1);
- memcpy(fp, vfp_e, vfp_o - vfp_e);
- if(sign > 0) {
- fp[64] ^= 0x00000040; fp[65] ^= 0x00000040; fp[68] ^= 0x00000040; fp[75] ^= 0x00000040;
- fp[76] ^= 0x00000040; fp[79] ^= 0x00000040; fp[80] ^= 0x00000040; fp[83] ^= 0x00000040;
- fp[84] ^= 0x00000040; fp[87] ^= 0x00000040; fp[91] ^= 0x00000040; fp[93] ^= 0x00000040;
- }
- fp += (vfp_o - vfp_e) / 4;
+ ADDI(&fp, 2, 7, 0);
+ ADDI(&fp, 7, 9, 0);
+ ADDI(&fp, 9, 2, 0);
+
+ ADDI(&fp, 2, 8, 0);
+ ADDI(&fp, 8, 10, 0);
+ ADDI(&fp, 10, 2, 0);
+
+ MOVI(&fp, 11, (p->i1>0) ? p->i1 : 1);
+ memcpy(fp, vfp_o, vfp_x4 - vfp_o);
+ if(sign > 0) {
+ fp[22] ^= 0x00000040;
+ fp[24] ^= 0x00000040;
+ fp[25] ^= 0x00000040;
+ fp[26] ^= 0x00000040;
+ fp[62] ^= 0x00000040;
+ fp[64] ^= 0x00000040;
+ fp[65] ^= 0x00000040;
+ fp[66] ^= 0x00000040;
+ }
+ fp += (vfp_x4 - vfp_o) / 4;
+
+ ADDI(&fp, 2, 3, 0);
+ ADDI(&fp, 3, 7, 0);
+ ADDI(&fp, 7, 2, 0);
+
+ ADDI(&fp, 2, 4, 0);
+ ADDI(&fp, 4, 8, 0);
+ ADDI(&fp, 8, 2, 0);
+
+ ADDI(&fp, 2, 5, 0);
+ ADDI(&fp, 5, 9, 0);
+ ADDI(&fp, 9, 2, 0);
+
+ ADDI(&fp, 2, 6, 0);
+ ADDI(&fp, 6, 10, 0);
+ ADDI(&fp, 10, 2, 0);
+
+ ADDI(&fp, 2, 9, 0);
+ ADDI(&fp, 9, 10, 0);
+ ADDI(&fp, 10, 2, 0);
+
+ *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p));
+ fp++;
+ MOVI(&fp, 11, (p->i2>0) ? p->i2 : 1);
+ memcpy(fp, vfp_e, vfp_o - vfp_e);
+ if(sign > 0) {
+ fp[64] ^= 0x00000040;
+ fp[65] ^= 0x00000040;
+ fp[68] ^= 0x00000040;
+ fp[75] ^= 0x00000040;
+ fp[76] ^= 0x00000040;
+ fp[79] ^= 0x00000040;
+ fp[80] ^= 0x00000040;
+ fp[83] ^= 0x00000040;
+ fp[84] ^= 0x00000040;
+ fp[87] ^= 0x00000040;
+ fp[91] ^= 0x00000040;
+ fp[93] ^= 0x00000040;
+ }
+ fp += (vfp_o - vfp_e) / 4;
#endif
- *fp = LDRI(2, 1, ((uint32_t)&p->ws) - ((uint32_t)p)); fp++; // load offsets into r12
- //ADDI(&fp, 2, 1, 0);
- MOVI(&fp, 1, 0);
-
- // args: r0 - out
- // r1 - N
- // r2 - ws
-// ADDI(&fp, 3, 1, 0); // put N into r3 for counter
-
- int32_t pAddr = 0;
- int32_t pN = 0;
- int32_t pLUT = 0;
- count = 2;
- while(pps[0]) {
-
-// fprintf(stderr, "size %zu at %zu - diff %zu\n", pps[0], pps[1]*4, (pps[1]*4) - pAddr);
- if(!pN) {
- MOVI(&fp, 1, pps[0]);
- }else{
- if((pps[1]*4)-pAddr) ADDI(&fp, 0, 0, (pps[1] * 4)- pAddr);
- if(pps[0] - pN) ADDI(&fp, 1, 1, pps[0] - pN);
- }
-
- if(p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT)
- ADDI(&fp, 2, 2, p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT);
-
-
- if(pps[0] == 2*leafN) {
- *fp = BL(fp+2, x_4_addr); fp++;
- }else if(!pps[2]){
- //uint32_t *x_8_t_addr = fp;
+ *fp = LDRI(2, 1, ((uint32_t)&p->ws) - ((uint32_t)p));
+ fp++; // load offsets into r12
+ //ADDI(&fp, 2, 1, 0);
+ MOVI(&fp, 1, 0);
+
+ // args: r0 - out
+ // r1 - N
+ // r2 - ws
+ // ADDI(&fp, 3, 1, 0); // put N into r3 for counter
+
+ count = 2;
+ while(pps[0]) {
+
+ // fprintf(stderr, "size %zu at %zu - diff %zu\n", pps[0], pps[1]*4, (pps[1]*4) - pAddr);
+ if(!pN) {
+ MOVI(&fp, 1, pps[0]);
+ } else {
+ if((pps[1]*4)-pAddr) ADDI(&fp, 0, 0, (pps[1] * 4)- pAddr);
+ if(pps[0] - pN) ADDI(&fp, 1, 1, pps[0] - pN);
+ }
+
+ if (p->ws_is[ffts_ctzl(pps[0]/leaf_N)-1]*8 - pLUT) {
+ ADDI(&fp, 2, 2, p->ws_is[ffts_ctzl(pps[0]/leaf_N)-1]*8 - pLUT);
+ }
+
+ if(pps[0] == 2 * leaf_N) {
+ *fp = BL(fp+2, x_4_addr);
+ fp++;
+ } else if(!pps[2]) {
+ //uint32_t *x_8_t_addr = fp;
#ifdef HAVE_NEON
- memcpy(fp, neon_x8_t, neon_ee - neon_x8_t);
- if(sign < 0) {
- fp[31] ^= 0x00200000; fp[32] ^= 0x00200000; fp[33] ^= 0x00200000; fp[34] ^= 0x00200000;
- fp[65] ^= 0x00200000; fp[66] ^= 0x00200000; fp[70] ^= 0x00200000; fp[74] ^= 0x00200000;
- fp[97] ^= 0x00200000; fp[98] ^= 0x00200000; fp[102] ^= 0x00200000; fp[104] ^= 0x00200000;
- }
- fp += (neon_ee - neon_x8_t) / 4;
- //*fp++ = BL(fp+2, x_8_t_addr);
+ memcpy(fp, neon_x8_t, neon_ee - neon_x8_t);
+ if(sign < 0) {
+ fp[31] ^= 0x00200000;
+ fp[32] ^= 0x00200000;
+ fp[33] ^= 0x00200000;
+ fp[34] ^= 0x00200000;
+ fp[65] ^= 0x00200000;
+ fp[66] ^= 0x00200000;
+ fp[70] ^= 0x00200000;
+ fp[74] ^= 0x00200000;
+ fp[97] ^= 0x00200000;
+ fp[98] ^= 0x00200000;
+ fp[102] ^= 0x00200000;
+ fp[104] ^= 0x00200000;
+ }
+ fp += (neon_ee - neon_x8_t) / 4;
+ //*fp++ = BL(fp+2, x_8_t_addr);
#else
- *fp = BL(fp+2, x_8_addr); fp++;
+ *fp = BL(fp+2, x_8_addr);
+ fp++;
#endif
- }else{
- *fp = BL(fp+2, x_8_addr); fp++;
- }
-
- pAddr = pps[1] * 4;
- pN = pps[0];
- pLUT = p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8;//LUT_offset(pps[0], leafN);
-// fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT);
- count += 4;
- pps += 2;
- }
-
- *fp++ = 0xecbd8b10;
- *fp++ = POP_LR(); count++;
+ } else {
+ *fp = BL(fp+2, x_8_addr);
+ fp++;
+ }
+
+ pAddr = pps[1] * 4;
+ pN = pps[0];
+ pLUT = p->ws_is[ffts_ctzl(pps[0]/leaf_N)-1]*8;//LUT_offset(pps[0], leafN);
+ // fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT);
+ count += 4;
+ pps += 2;
+ }
+
+ *fp++ = 0xecbd8b10;
+ *fp++ = POP_LR();
+ count++;
#else
- POP(&fp, R15);
- POP(&fp, R14);
- POP(&fp, R13);
- POP(&fp, R12);
- POP(&fp, R11);
- POP(&fp, R10);
- POP(&fp, RBX);
- POP(&fp, RBP);
- RET(&fp);
-
+ generate_epilogue(&fp);
+#endif
-//uint8_t *pp = func;
-//int counter = 0;
-//do{
-// printf("%02x ", *pp);
-// if(counter++ % 16 == 15) printf("\n");
-//} while(++pp < fp);
+ // *fp++ = B(14); count++;
-//printf("\n");
+ //for(int i=0;i<(neon_x8 - neon_x4)/4;i++)
+ // fprintf(stderr, "%08x\n", x_4_addr[i]);
+ //fprintf(stderr, "\n");
+ //for(int i=0;i<count;i++)
+ //fprintf(stderr, "size of transform %u = %d\n", N, (fp - x_8_addr) * sizeof(*fp));
-#endif
+ free(ps);
+#if defined(_MSC_VER)
+#pragma warning(push)
-// *fp++ = B(14); count++;
-
-//for(int i=0;i<(neon_x8 - neon_x4)/4;i++)
-// fprintf(stderr, "%08x\n", x_4_addr[i]);
-//fprintf(stderr, "\n");
-//for(int i=0;i<count;i++)
-
- free(ps);
-
- if (mprotect(func, p->transform_size, PROT_READ | PROT_EXEC)) {
- perror("Couldn't mprotect");
- exit(1);
- }
-#ifdef __APPLE__
- sys_icache_invalidate(func, p->transform_size);
-#elif __ANDROID__
- cacheflush((long)(func), (long)(func) + p->transform_size, 0);
-#elif __linux__
-#ifdef __GNUC__
- __clear_cache((long)(func), (long)(func) + p->transform_size);
-#endif
+ /* disable type cast warning from data pointer to function pointer */
+#pragma warning(disable : 4055)
#endif
-//fprintf(stderr, "size of transform %zu = %d\n", N, (fp-func)*4);
+ return (transform_func_t) start;
- p->transform = (void *) (start);
-}
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+} \ No newline at end of file
diff --git a/lib/ffts/src/codegen.h b/lib/ffts/src/codegen.h
index f592907..e170ca7 100644
--- a/lib/ffts/src/codegen.h
+++ b/lib/ffts/src/codegen.h
@@ -1,10 +1,10 @@
/*
-
+
This file is part of FFTS -- The Fastest Fourier Transform in the South
-
+
Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
+ Copyright (c) 2012, The University of Waikato
+
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,19 +31,16 @@
*/
-#ifndef __CODEGEN_H__
-#define __CODEGEN_H__
+#ifndef FFTS_CODEGEN_H
+#define FFTS_CODEGEN_H
-#include <stddef.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <sys/mman.h>
-#include <string.h>
-#include <limits.h> /* for PAGESIZE */
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
#include "ffts.h"
+#include "ffts_internal.h"
-void ffts_generate_func_code(ffts_plan_t *, size_t N, size_t leafN, int sign);
+transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N, int sign);
-#endif
+#endif /* FFTS_CODEGEN_H */
diff --git a/lib/ffts/src/codegen_arm.h b/lib/ffts/src/codegen_arm.h
index ad8a9d8..3d146da 100644
--- a/lib/ffts/src/codegen_arm.h
+++ b/lib/ffts/src/codegen_arm.h
@@ -31,10 +31,14 @@
*/
-#ifndef __CODEGEN_ARM_H__
-#define __CODEGEN_ARM_H__
+#ifndef FFTS_CODEGEN_ARM_H
+#define FFTS_CODEGEN_ARM_H
+#include "neon.h"
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
uint32_t BL(void *pos, void *target) {
return 0xeb000000 | (((target - pos) / 4) & 0xffffff);
@@ -95,7 +99,130 @@ void MOVI(uint32_t **p, uint8_t dst, uint32_t imm) {
uint32_t PUSH_LR() { return 0xe92d4ff0; } //0xe92d4000; }
uint32_t POP_LR() { return 0xe8bd8ff0; } //0xe8bd8000; }
+static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign)
+{
+ insns_t *x_4_addr;
+ size_t len;
+ x_4_addr = *fp;
+#ifdef HAVE_NEON
+ len = (char*) neon_x8 - (char*) neon_x4;
+ memcpy(x_4_addr, neon_x4, len);
+ if (sign < 0) {
+ x_4_addr[26] ^= 0x00200000;
+ x_4_addr[28] ^= 0x00200000;
+ x_4_addr[31] ^= 0x00200000;
+ x_4_addr[32] ^= 0x00200000;
+ }
+#else
+ len = (char*) vfp_x8 - (char*) vfp_x4;
+ memcpy(x_4_addr, vfp_x4, len);
+
+ if (sign > 0) {
+ x_4_addr[36] ^= 0x00000040;
+ x_4_addr[38] ^= 0x00000040;
+ x_4_addr[43] ^= 0x00000040;
+ x_4_addr[44] ^= 0x00000040;
+ }
#endif
+
+ *fp += len / 4;
+ return x_4_addr;
+}
+
+static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign)
+{
+ insns_t *x_8_addr;
+ ptrdiff_t len;
+
+ x_8_addr = *fp;
+
+#ifdef HAVE_NEON
+ len = (char*) neon_x8_t - (char*) neon_x8;
+ memcpy(x_8_addr, neon_x8, len);
+
+ /*
+ * Changes adds to subtracts and vice versa to allow the computation
+ * of both the IFFT and FFT
+ */
+ if (sign < 0) {
+ x_8_addr[31] ^= 0x00200000;
+ x_8_addr[32] ^= 0x00200000;
+ x_8_addr[33] ^= 0x00200000;
+ x_8_addr[34] ^= 0x00200000;
+ x_8_addr[65] ^= 0x00200000;
+ x_8_addr[66] ^= 0x00200000;
+ x_8_addr[70] ^= 0x00200000;
+ x_8_addr[74] ^= 0x00200000;
+ x_8_addr[97] ^= 0x00200000;
+ x_8_addr[98] ^= 0x00200000;
+ x_8_addr[102] ^= 0x00200000;
+ x_8_addr[104] ^= 0x00200000;
+ }
+
+ *fp += len / 4;
+
+ //uint32_t *x_8_t_addr = fp;
+ //memcpy(fp, neon_x8_t, neon_end - neon_x8_t);
+ //fp += (neon_end - neon_x8_t) / 4;
+#else
+ len = (char*) vfp_end - (char*) vfp_x8;
+ memcpy(x_8_addr, vfp_x8, len);
+
+ if (sign > 0) {
+ x_8_addr[65] ^= 0x00000040;
+ x_8_addr[66] ^= 0x00000040;
+ x_8_addr[68] ^= 0x00000040;
+ x_8_addr[70] ^= 0x00000040;
+ x_8_addr[103] ^= 0x00000040;
+ x_8_addr[104] ^= 0x00000040;
+ x_8_addr[105] ^= 0x00000040;
+ x_8_addr[108] ^= 0x00000040;
+ x_8_addr[113] ^= 0x00000040;
+ x_8_addr[114] ^= 0x00000040;
+ x_8_addr[117] ^= 0x00000040;
+ x_8_addr[118] ^= 0x00000040;
+ }
+
+ *fp += len / 4;
+#endif
+
+ return x_8_addr;
+}
+
+static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p)
+{
+ insns_t *start = *fp;
+
+ *(*fp)++ = PUSH_LR();
+ *(*fp)++ = 0xed2d8b10;
+
+ ADDI(fp, 3, 1, 0);
+ ADDI(fp, 7, 1, p->N);
+ ADDI(fp, 5, 1, 2 * p->N);
+ ADDI(fp, 10, 7, 2 * p->N);
+ ADDI(fp, 4, 5, 2 * p->N);
+ ADDI(fp, 8, 10, 2 * p->N);
+ ADDI(fp, 6, 4, 2 * p->N);
+ ADDI(fp, 9, 8, 2 * p->N);
+
+ // load offsets into r12
+ *(*fp)++ = LDRI(12, 0, ((uint32_t) &p->offsets) - ((uint32_t) p));
+ // *(*fp)++ = LDRI(1, 0, 4); // load ws into r1
+ ADDI(fp, 1, 0, 0);
+
+ ADDI(fp, 0, 2, 0), // mov out into r0
+ *(*fp)++ = LDRI(2, 1, ((uint32_t) &p->ee_ws) - ((uint32_t) p));
+
+#ifdef HAVE_NEON
+ MOVI(fp, 11, p->i0);
+#else
+ MOVI(fp, 11, p->i0);
+#endif
+
+ return start;
+}
+
+#endif /* FFTS_CODEGEN_ARM_H */ \ No newline at end of file
diff --git a/lib/ffts/src/codegen_sse.h b/lib/ffts/src/codegen_sse.h
index ec85667..e9819f1 100644
--- a/lib/ffts/src/codegen_sse.h
+++ b/lib/ffts/src/codegen_sse.h
@@ -1,195 +1,1680 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifndef FFTS_CODEGEN_SSE_H
+#define FFTS_CODEGEN_SSE_H
-#ifndef __CODEGEN_SSE_H__
-#define __CODEGEN_SSE_H__
-
-void neon_x4(float *, size_t, float *);
-void neon_x8(float *, size_t, float *);
-void neon_x8_t(float *, size_t, float *);
-void leaf_ee_init();
-void leaf_ee();
-void leaf_oo();
-void leaf_eo();
-void leaf_oe();
-void leaf_end();
-void x_init();
-void x4();
-void x8_soft();
-void x8_hard();
-
-void sse_constants();
-void sse_constants_inv();
-
-// typedef uint8_t insns_t;
-
-extern const uint32_t sse_leaf_ee_offsets[8];
-extern const uint32_t sse_leaf_oo_offsets[8];
-extern const uint32_t sse_leaf_eo_offsets[8];
-extern const uint32_t sse_leaf_oe_offsets[8];
-
-#define EAX 0
-#define ECX 1
-#define EDX 2
-#define EBX 3
-#define ESI 6
-#define EDI 7
-#define EBP 5
-
-#define RAX 0
-#define RCX 1
-#define RDX 2
-#define RBX 3
-#define RSI 6
-#define RDI 7
-#define RBP 5
-#define R8 8
-#define R9 9
-#define R10 10
-#define R11 11
-#define R12 12
-#define R13 13
-#define R14 14
-#define R15 15
-
-void IMM8(uint8_t **p, int32_t imm) {
- *(*p)++ = (imm & 0xff);
-}
+#define TARGET_AMD64
+#include "arch/x64/x64-codegen.h"
-void IMM16(uint8_t **p, int32_t imm) {
- int i;
- for(i=0;i<2;i++) {
- *(*p)++ = (imm & (0xff << (i*8))) >> (i*8);
- }
-}
-void IMM32(uint8_t **p, int32_t imm) {
- int i;
- for(i=0;i<4;i++) {
- *(*p)++ = (imm & (0xff << (i*8))) >> (i*8);
- }
+#include <assert.h>
+
+static const FFTS_ALIGN(16) unsigned int sse_constants[20] = {
+ /* 0.0, -0.0, 0.0, -0.0 */
+ 0x00000000, 0x80000000, 0x00000000, 0x80000000,
+ /* 0.707, 0.707, 0.707, 0.707 */
+ 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, 0x3f3504f3,
+ /* -0.707, 0.707, -0.707, 0.707 */
+ 0xbf3504f3, 0x3f3504f3, 0xbf3504f3, 0x3f3504f3,
+ /* 1.0, 1.0, 0.707, 0.707 */
+ 0x3f800000, 0x3f800000, 0x3f3504f3, 0x3f3504f3,
+ /* 0.0, 0.0, -.707, 0.707 */
+ 0x00000000, 0x00000000, 0xbf3504f3, 0x3f3504f3
+};
+
+static const FFTS_ALIGN(16) unsigned int sse_constants_inv[20] = {
+ /* -0.0, 0.0, -0.0, 0.0 */
+ 0x80000000, 0x00000000, 0x80000000, 0x00000000,
+ /* 0.707, 0.707, 0.707, 0.707 */
+ 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, 0x3f3504f3,
+ /* 0.707, -0.707, 0.707, -0.707 */
+ 0x3f3504f3, 0xbf3504f3, 0x3f3504f3, 0xbf3504f3,
+ /* 1.0, 1.0, 0.707, 0.707 */
+ 0x3f800000, 0x3f800000, 0x3f3504f3, 0x3f3504f3,
+ /* 0.0, 0.0, 0.707, -0.707 */
+ 0x00000000, 0x00000000, 0x3f3504f3, 0xbf3504f3
+};
+
+#define P(x) (*(*p)++ = x)
+
+static FFTS_INLINE void
+ffts_insert_nops(uint8_t **p, uint32_t count)
+{
+ if (count >= 9) {
+ P(0x66);
+ P(0x0F);
+ P(0x1F);
+ P(0x84);
+ P(0x00);
+ P(0x00);
+ P(0x00);
+ P(0x00);
+ P(0x00);
+
+ if (count > 9) {
+ ffts_insert_nops(p, count - 9);
+ }
+ } else {
+ switch(count) {
+ case 0:
+ break;
+ case 2:
+ P(0x66);
+ /* fall through */
+ case 1:
+ P(0x90);
+ break;
+ case 3:
+ P(0x0F);
+ P(0x1F);
+ P(0x00);
+ break;
+ case 4:
+ P(0x0F);
+ P(0x1F);
+ P(0x40);
+ P(0x00);
+ break;
+ case 5:
+ P(0x0F);
+ P(0x1F);
+ P(0x44);
+ P(0x00);
+ P(0x00);
+ break;
+ case 6:
+ P(0x66);
+ P(0x0F);
+ P(0x1F);
+ P(0x44);
+ P(0x00);
+ P(0x00);
+ break;
+ case 7:
+ P(0x0F);
+ P(0x1F);
+ P(0x80);
+ P(0x00);
+ P(0x00);
+ P(0x00);
+ P(0x00);
+ break;
+ case 8:
+ default:
+ P(0x0F);
+ P(0x1F);
+ P(0x84);
+ P(0x00);
+ P(0x00);
+ P(0x00);
+ P(0x00);
+ P(0x00);
+ break;
+ }
+ }
}
-void IMM32_NI(uint8_t *p, int32_t imm) {
- int i;
- for(i=0;i<4;i++) {
- *(p+i) = (imm & (0xff << (i*8))) >> (i*8);
- }
+
+static FFTS_INLINE void
+ffts_align_mem16(uint8_t **p, uint32_t offset)
+{
+ int r = (16 - (offset & 0xf)) - (int) ((uintptr_t)(*p) & 0xf);
+ r = (16 + r) & 0xf;
+ ffts_insert_nops(p, r);
}
-int32_t READ_IMM32(uint8_t *p) {
- int32_t rval = 0;
- int i;
- for(i=0;i<4;i++) {
- rval |= *(p+i) << (i*8);
- }
- return rval;
+static FFTS_INLINE void
+generate_epilogue(insns_t **fp)
+{
+#ifdef _M_X64
+ /* restore nonvolatile registers */
+ x64_mov_reg_membase(*fp, X64_RBX, X64_RSP, -64, 8);
+ x64_mov_reg_membase(*fp, X64_RSI, X64_RSP, -56, 8);
+
+ x64_sse_movaps_reg_membase(*fp, X64_XMM6, X64_RSP, -48);
+ x64_sse_movaps_reg_membase(*fp, X64_XMM7, X64_RSP, -32);
+ x64_sse_movaps_reg_membase(*fp, X64_XMM8, X64_RSP, -16);
+ x64_sse_movaps_reg_membase(*fp, X64_XMM9, X64_RSP, 0);
+ x64_sse_movaps_reg_membase(*fp, X64_XMM10, X64_RSP, 16);
+ x64_sse_movaps_reg_membase(*fp, X64_XMM11, X64_RSP, 32);
+ x64_sse_movaps_reg_membase(*fp, X64_XMM12, X64_RSP, 48);
+ x64_sse_movaps_reg_membase(*fp, X64_XMM13, X64_RSP, 64);
+
+ /* restore the last 2 registers from the shadow space */
+ x64_sse_movaps_reg_membase(*fp, X64_XMM14, X64_RSP, 96);
+ x64_sse_movaps_reg_membase(*fp, X64_XMM15, X64_RSP, 112);
+
+ /* restore stack */
+ x64_alu_reg_imm_size(*fp, X86_ADD, X64_RSP, 88, 8);
+#else
+ x64_pop_reg(*fp, X64_R15);
+ x64_pop_reg(*fp, X64_R14);
+ x64_pop_reg(*fp, X64_R13);
+ x64_pop_reg(*fp, X64_R12);
+ x64_pop_reg(*fp, X64_R11);
+ x64_pop_reg(*fp, X64_R10);
+ x64_pop_reg(*fp, X64_RBX);
+ x64_pop_reg(*fp, X64_RBP);
+#endif
+
+ x64_ret(*fp);
}
-void MOVI(uint8_t **p, uint8_t dst, uint32_t imm) {
-// if(imm < 65536) *(*p)++ = 0x66;
- if(dst >= 8) *(*p)++ = 0x41;
-
- //if(imm < 65536 && imm >= 256) *(*p)++ = 0x66;
-
- //if(imm >= 256)
- *(*p)++ = 0xb8 | (dst & 0x7);
-// else *(*p)++ = 0xb0 | (dst & 0x7);
-
- // if(imm < 256) IMM8(p, imm);
-// else
-//if(imm < 65536) IMM16(p, imm);
-//else
- IMM32(p, imm);
-
-//if(dst < 8) {
-// *(*p)++ = 0xb8 + dst;
-//}else{
-// *(*p)++ = 0x49;
-// *(*p)++ = 0xc7;
-// *(*p)++ = 0xc0 | (dst - 8);
-//}
-//IMM32(p, imm);
+static FFTS_INLINE insns_t*
+generate_prologue(insns_t **fp, ffts_plan_t *p)
+{
+ insns_t *start;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ /* align call destination */
+ ffts_align_mem16(fp, 0);
+ start = *fp;
+
+ /* save nonvolatile registers */
+#ifdef _M_X64
+ /* reserve space to save XMM6-XMM15 registers */
+ x64_alu_reg_imm_size(*fp, X86_SUB, X64_RSP, 88, 8);
+
+ x64_mov_membase_reg(*fp, X64_RSP, -64, X64_RBX, 8);
+ x64_mov_membase_reg(*fp, X64_RSP, -56, X64_RSI, 8);
+
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, -48, X64_XMM6);
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, -32, X64_XMM7);
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, -16, X64_XMM8);
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, 0, X64_XMM9);
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, 16, X64_XMM10);
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, 32, X64_XMM11);
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, 48, X64_XMM12);
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, 64, X64_XMM13);
+
+ /* use the shadow space to save last 2 registers */
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, 96, X64_XMM14);
+ x64_sse_movaps_membase_reg(*fp, X64_RSP, 112, X64_XMM15);
+#else
+ x64_push_reg(*fp, X64_RBP);
+ x64_push_reg(*fp, X64_RBX);
+ x64_push_reg(*fp, X64_R10);
+ x64_push_reg(*fp, X64_R11);
+ x64_push_reg(*fp, X64_R12);
+ x64_push_reg(*fp, X64_R13);
+ x64_push_reg(*fp, X64_R14);
+ x64_push_reg(*fp, X64_R15);
+#endif
+
+ return start;
}
-void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) {
- if(disp == 0) {
- *(*p)++ = (rm & 7) | ((reg & 7) << 3);
- }else if(disp <= 127 || disp >= -128) {
- *(*p)++ = 0x40 | (rm & 7) | ((reg & 7) << 3);
- IMM8(p, disp);
- }else{
- *(*p)++ = 0x80 | (rm & 7) | ((reg & 7) << 3);
- IMM32(p, disp);
- }
+static FFTS_INLINE void
+generate_transform_init(insns_t **fp)
+{
+#ifdef _M_X64
+ /* generate function */
+ x64_sse_movaps_reg_membase(*fp, X64_XMM3, X64_RSI, 0);
+
+ /* set "pointer" to twiddle factors */
+ x64_mov_reg_membase(*fp, X64_R9, X64_RCX, 0x20, 8);
+#else
+ /* generate function */
+ x64_sse_movaps_reg_membase(*fp, X64_XMM3, X64_R9, 0);
+
+ /* set "pointer" to twiddle factors */
+ x64_mov_reg_membase(*fp, X64_R8, X64_RDI, 0x20, 8);
+#endif
}
-void LEA(uint8_t **p, uint8_t dst, uint8_t base, int32_t disp) {
+static FFTS_INLINE insns_t*
+generate_size4_base_case(insns_t **fp, int sign)
+{
+ insns_t *ins;
+ insns_t *x4_addr;
+
+ /* unreferenced parameter */
+ (void) sign;
+
+ /* to avoid deferring */
+ ins = *fp;
+
+ /* align call destination */
+ ffts_align_mem16(&ins, 0);
+ x4_addr = ins;
+
+#ifdef _M_X64
+ /* generate function */
+ x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_R8, 64);
+ x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 96);
+ x64_sse_movaps_reg_membase(ins, X64_XMM7, X64_R8, 0);
+ x64_sse_movaps_reg_membase(ins, X64_XMM4, X64_R9, 0);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM7);
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4);
+ x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R9, 16);
+ x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM0);
+ x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM1, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM2);
+ x64_sse_mulps_reg_reg(ins, X64_XMM2, X64_XMM1);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM0);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM4);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM4);
+ x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_R8, 32);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3);
- *(*p)++ = 0x48 | ((base & 0x8) >> 3) | ((dst & 0x8) >> 1);
- *(*p)++ = 0x8d;
- ADDRMODE(p, dst, base, disp);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R8, 112);
+ x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM5);
+ x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM5);
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM6);
+
+ x64_sse_movaps_membase_reg(ins, X64_R8, 0, X64_XMM7);
+ x64_sse_movaps_membase_reg(ins, X64_R8, 32, X64_XMM8);
+ x64_sse_movaps_membase_reg(ins, X64_R8, 64, X64_XMM9);
+ x64_sse_movaps_membase_reg(ins, X64_R8, 96, X64_XMM10);
+
+ x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_R9, 32);
+ x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_R8, 80);
+ x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM14);
+ x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_R9, 48);
+ x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM11);
+ x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM13);
+ x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM12);
+ x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM13);
+ x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM0);
+ x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM0, X64_XMM3);
+
+ x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 16);
+ x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R8, 48);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM2);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15);
+ x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM0);
+ x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM15);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM0);
+
+ x64_sse_movaps_membase_reg(ins, X64_R8, 16, X64_XMM1);
+ x64_sse_movaps_membase_reg(ins, X64_R8, 48, X64_XMM2);
+ x64_sse_movaps_membase_reg(ins, X64_R8, 80, X64_XMM4);
+ x64_sse_movaps_membase_reg(ins, X64_R8, 112, X64_XMM5);
+
+ x64_ret(ins);
+#else
+ /* generate function */
+ x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_RDX, 64);
+ x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_RDX, 96);
+ x64_sse_movaps_reg_membase(ins, X64_XMM7, X64_RDX, 0);
+ x64_sse_movaps_reg_membase(ins, X64_XMM4, X64_R8, 0);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM7);
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4);
+ x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R8, 16);
+ x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM0);
+ x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM1, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM2);
+ x64_sse_mulps_reg_reg(ins, X64_XMM2, X64_XMM1);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM0);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM4);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM4);
+ x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RDX, 32);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RDX, 112);
+ x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM5);
+ x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM5);
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM6);
+
+ x64_sse_movaps_membase_reg(ins, X64_RDX, 0, X64_XMM7);
+ x64_sse_movaps_membase_reg(ins, X64_RDX, 32, X64_XMM8);
+ x64_sse_movaps_membase_reg(ins, X64_RDX, 64, X64_XMM9);
+ x64_sse_movaps_membase_reg(ins, X64_RDX, 96, X64_XMM10);
+
+ x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_R8, 32);
+ x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_RDX, 80);
+ x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM14);
+ x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_R8, 48);
+ x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM11);
+ x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM13);
+ x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM12);
+ x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM13);
+ x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM0);
+ x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM0, X64_XMM3);
+
+ x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_RDX, 16);
+ x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_RDX, 48);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM2);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15);
+ x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM0);
+ x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM15);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM0);
+
+ x64_sse_movaps_membase_reg(ins, X64_RDX, 16, X64_XMM1);
+ x64_sse_movaps_membase_reg(ins, X64_RDX, 48, X64_XMM2);
+ x64_sse_movaps_membase_reg(ins, X64_RDX, 80, X64_XMM4);
+ x64_sse_movaps_membase_reg(ins, X64_RDX, 112, X64_XMM5);
+
+ x64_ret(ins);
+#endif
+
+ *fp = ins;
+ return x4_addr;
}
-void RET(uint8_t **p) {
- *(*p)++ = 0xc3;
+static FFTS_INLINE void
+generate_leaf_init(insns_t **fp, uint32_t loop_count)
+{
+ /* to avoid deferring */
+ insns_t *ins = *fp;
+
+#ifdef _M_X64
+ /* set loop counter */
+ x86_mov_reg_imm(ins, X86_EBX, loop_count);
+
+ /* generate function */
+
+ /* clear */
+ x86_clear_reg(ins, X86_EAX);
+
+ /* set "pointer" to offsets */
+ x64_mov_reg_membase(ins, X64_R9, X64_RCX,
+ offsetof(struct _ffts_plan_t, offsets), 8);
+
+ /* set "pointer" to constants */
+ x64_mov_reg_membase(ins, X64_RSI, X64_RCX,
+ offsetof(struct _ffts_plan_t, constants), 8);
+
+ /* use XMM3 for sign change */
+ x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_RSI, 0);
+#else
+ /* set loop counter */
+ x86_mov_reg_imm(ins, X86_ECX, loop_count);
+
+ /* generate function */
+
+ /* clear */
+ x86_clear_reg(ins, X86_EAX);
+
+ /* set "pointer" to offsets */
+ x64_mov_reg_membase(ins, X64_R8, X64_RDI,
+ offsetof(struct _ffts_plan_t, offsets), 8);
+
+ /* set "pointer" to constants */
+ x64_mov_reg_membase(ins, X64_R9, X64_RDI,
+ offsetof(struct _ffts_plan_t, constants), 8);
+
+ /* align loop/jump destination */
+ ffts_align_mem16(&ins, 9);
+#endif
+
+ *fp = ins;
}
-void ADDI(uint8_t **p, uint8_t dst, int32_t imm) {
-
- if(dst >= 8) *(*p)++ = 0x49;
- else *(*p)++ = 0x48;
-
- if(imm > 127 || imm <= -128) *(*p)++ = 0x81;
- else *(*p)++ = 0x83;
-
- *(*p)++ = 0xc0 | (dst & 0x7);
-
- if(imm > 127 || imm <= -128) IMM32(p, imm);
- else IMM8(p, imm);
+static FFTS_INLINE void
+generate_leaf_ee(insns_t **fp, uint32_t *offsets, int extend)
+{
+ insns_t *leaf_ee_loop;
+
+ /* to avoid deferring */
+ insns_t *ins = *fp;
+
+#ifdef _M_X64
+ x64_sse_movaps_reg_membase_size(ins, X64_XMM0, X64_RSI, 32, 1);
+
+ /* beginning of the loop (make sure it's 16 byte aligned) */
+ leaf_ee_loop = ins;
+ assert(!(((uintptr_t) leaf_ee_loop) & 0xF));
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[0], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[2], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[3], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12);
+ x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM3);
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[1], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[4], X64_RAX, 2);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9);
+ x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM9);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RDX, offsets[5], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[6], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[7], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM8);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0);
+ extend--;
+
+ x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2);
+ x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13);
+ x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14);
+ x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 16);
+ x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM9);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM5, extend > 0);
+ extend--;
+
+ x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM10);
+ x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15);
+ x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM8);
+
+ x64_sse_addps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0);
+ extend--;
+
+ x64_sse_subps_reg_reg_size(ins, X64_XMM5, X64_XMM1, extend > 0);
+ extend--;
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM9, X64_XMM3);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM1, X64_XMM6, extend > 0);
+ extend--;
+
+ x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM0);
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM4);
+ x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM0);
+ x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM8);
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM12);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM7);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM9, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM8, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM9);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM8);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM8);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM3);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM6);
+ x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4);
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM4, 0xEE);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM1, extend > 0);
+ extend--;
+
+ x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM7);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM7, 0xEE);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM5, extend > 0);
+ extend--;
+
+ x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM13);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM9, X64_XMM14);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM13, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM8);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM4);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM7);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM9);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM2);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM1);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM5);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM6);
+
+ /* loop condition */
+ x64_alu_reg_reg_size(ins, X86_CMP, X64_RBX, X64_RAX, 8);
+ x64_branch_size(ins, X86_CC_NE, leaf_ee_loop, 0, 4);
+#else
+ x64_sse_movaps_reg_membase_size(ins, X64_XMM0, X64_R9, 32, 1);
+
+ /* use XMM8 for sign change */
+ x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_R9, 0);
+
+ /* beginning of the loop (make sure it's 16 byte aligned) */
+ leaf_ee_loop = ins;
+ assert(!(((uintptr_t) leaf_ee_loop) & 0xF));
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[0], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RSI, offsets[2], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RSI, offsets[3], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12);
+ x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8);
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RSI, offsets[1], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RSI, offsets[4], X64_RAX, 2);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9);
+ x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM9);
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RSI, offsets[5], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM3, X64_RSI, offsets[6], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RSI, offsets[7], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM3);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0);
+ extend--;
+
+ x64_movsxd_reg_memindex(ins, X64_R11, X64_R8, 0, X64_RAX, 2);
+ x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13);
+ x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14);
+ x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R9, 16);
+ x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM9);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM5, extend > 0);
+ extend--;
+
+ x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM10);
+ x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15);
+ x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM3);
+
+ x64_sse_addps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0);
+ extend--;
+
+ x64_sse_subps_reg_reg_size(ins, X64_XMM5, X64_XMM1, extend > 0);
+ extend--;
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM9, X64_XMM8);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM3, 0xB1);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM1, X64_XMM6, extend > 0);
+ extend--;
+
+ x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM0);
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM4);
+ x64_sse_mulps_reg_reg(ins, X64_XMM3, X64_XMM0);
+ x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM3);
+ x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM12);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM7);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM9, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM9);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM3);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM3);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM6);
+ x64_movsxd_reg_memindex(ins, X64_R12, X64_R8, 8, X64_RAX, 2);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM3, X64_XMM4);
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM4, 0xEE);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM1, extend > 0);
+ extend--;
+
+ x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM7);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM7, 0xEE);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM5, extend > 0);
+ extend--;
+
+ x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM13);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM9, X64_XMM14);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM13, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R11, 2, X64_XMM3);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R11, 2, X64_XMM4);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R11, 2, X64_XMM7);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R11, 2, X64_XMM9);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R12, 2, X64_XMM2);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R12, 2, X64_XMM1);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R12, 2, X64_XMM5);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R12, 2, X64_XMM6);
+
+ /* loop condition */
+ x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RAX, 8);
+ x64_branch_size(ins, X86_CC_NE, leaf_ee_loop, 0, 4);
+#endif
+
+ *fp = ins;
}
-void CALL(uint8_t **p, uint8_t *func) {
- *(*p)++ = 0xe8;
- IMM32(p, ((void *)func) - (void *)(*p) - 4);
+static FFTS_INLINE void
+generate_leaf_eo(insns_t **fp, uint32_t *offsets)
+{
+ /* to avoid deferring */
+ insns_t *ins = *fp;
+
+#ifdef _M_X64
+ x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[0], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[2], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RDX, offsets[3], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[1], X64_RAX, 2);
+ x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM5);
+ x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM4);
+ x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM4);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM5);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7);
+ x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM10);
+ x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM11);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM9, 0xEE);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM10);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM11);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RDX, offsets[4], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[5], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM15);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[6], X64_RAX, 2);
+ x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM12);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RDX, offsets[7], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM13);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM2, X64_XMM8);
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM5);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM15);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM15, X64_XMM3);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM5);
+ x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM14);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM1, X64_XMM9);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM15, X64_XMM15, 0xB1);
+ x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_RSI, 48);
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM15);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM11);
+ x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM7);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM15);
+ x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RSI, 64);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM15);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM7);
+ x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM15);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9);
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM2);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM4, 0xEE);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM5);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM6);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM2);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM1);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM0);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM12);
+#else
+ x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RSI, offsets[0], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[2], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RSI, offsets[3], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RSI, offsets[1], X64_RAX, 2);
+ x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM5);
+ x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM4);
+ x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM4);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM5);
+ x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_R9, 0);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7);
+ x64_movsxd_reg_memindex(ins, X64_R12, X64_R8, 8, X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM10);
+ x64_movsxd_reg_memindex(ins, X64_R11, X64_R8, 0, X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM11);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM9, 0xEE);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R12, 2, X64_XMM10);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R12, 2, X64_XMM11);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RSI, offsets[4], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RSI, offsets[5], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM15);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RSI, offsets[6], X64_RAX, 2);
+ x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM12);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RSI, offsets[7], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM13);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM2, X64_XMM8);
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM5);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM15);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM15, X64_XMM3);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM5);
+ x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM14);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM1, X64_XMM9);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM15, X64_XMM15, 0xB1);
+ x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_R9, 48);
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM15);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM11);
+ x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM7);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM15);
+ x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_R9, 64);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM15);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM7);
+ x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM15);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9);
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM4, 0xEE);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R12, 2, X64_XMM5);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R12, 2, X64_XMM6);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R11, 2, X64_XMM2);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R11, 2, X64_XMM1);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R11, 2, X64_XMM3);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R11, 2, X64_XMM12);
+#endif
+
+ *fp = ins;
}
-void PUSH(uint8_t **p, uint8_t reg) {
- if(reg >= 8) *(*p)++ = 0x41;
- *(*p)++ = 0x50 | (reg & 7);
+static FFTS_INLINE void
+generate_leaf_oe(insns_t **fp, uint32_t *offsets)
+{
+ /* to avoid deferring */
+ insns_t *ins = *fp;
+
+#ifdef _M_X64
+ x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RDX, offsets[2], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM6);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xE4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM6, 0xE4);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[0], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[1], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM7);
+ x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM7);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9);
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM10, 0xEE);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM3);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12);
+ x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM9);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM10);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM9);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM12, 0xEE);
+ x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM10);
+ x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM13, X64_XMM11);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM13);
+ x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_RSI, 48);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM13);
+ x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 64);
+ x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM5);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM5, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM5, X64_XMM12);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM14);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5);
+ x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM4);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM4, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM12);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[4], X64_RAX, 2);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM4);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[6], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM9);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM2, X64_RDX, offsets[7], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RDX, offsets[5], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM2);
+ x64_sse_addps_reg_reg(ins, X64_XMM0, X64_XMM15);
+ x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM2);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM1);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM1);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3);
+
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8);
+ x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM0);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3);
+
+ x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7);
+ x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM6);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM0);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM8, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM9, 0xEE);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM2);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM4);
+ x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM0);
+ x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM13);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM13);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM11, X64_XMM9);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM10);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM11);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM2);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM0);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM14);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM4);
+#else
+ x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_R9, 0);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RSI, offsets[2], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RSI, offsets[3], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM6);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xE4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM6, 0xE4);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RSI, offsets[0], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[1], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_movsxd_reg_memindex(ins, X64_R11, X64_R8, 0, X64_RAX, 2);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM7);
+ x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM7);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9);
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM10, 0xEE);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM0);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12);
+ x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM9);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM10);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM9);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM12, 0xEE);
+ x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM10);
+ x64_movsxd_reg_memindex(ins, X64_R12, X64_R8, 8, X64_RAX, 2);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM13, X64_XMM11);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R11, 2, X64_XMM13);
+ x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_R9, 48);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM13);
+ x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R9, 64);
+ x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM5);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM5, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM5, X64_XMM12);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R11, 2, X64_XMM14);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5);
+ x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM4);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM4, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM12);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RSI, offsets[4], X64_RAX, 2);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM4);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[6], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM9);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM2, X64_RSI, offsets[7], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RSI, offsets[5], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM2);
+ x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM15);
+ x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM2);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM1);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM1);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM0);
+
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8);
+ x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM3);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM0);
+
+ x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7);
+ x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM6);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM3);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM8, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM9, 0xEE);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM2);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM4);
+ x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM3);
+ x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM13);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM13);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM11, X64_XMM9);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R11, 2, X64_XMM10);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R11, 2, X64_XMM11);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R12, 2, X64_XMM2);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R12, 2, X64_XMM3);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R12, 2, X64_XMM14);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R12, 2, X64_XMM4);
+#endif
+
+ *fp = ins;
}
-void POP(uint8_t **p, uint8_t reg) {
- if(reg >= 8) *(*p)++ = 0x41;
- *(*p)++ = 0x58 | (reg & 7);
+
+static FFTS_INLINE void
+generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint32_t *offsets, int extend)
+{
+ insns_t *leaf_oo_loop;
+
+ /* to avoid deferring */
+ insns_t *ins = *fp;
+
+#ifdef _M_X64
+ /* align loop/jump destination */
+ x86_mov_reg_imm(ins, X86_EBX, loop_count);
+
+ /* beginning of the loop (make sure it's 16 byte aligned) */
+ leaf_oo_loop = ins;
+ assert(!(((uintptr_t) leaf_oo_loop) & 0xF));
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[0], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM4, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[1], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[2], X64_RAX, 2);
+
+ x64_sse_addps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0);
+ extend--;
+
+ x64_sse_subps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM1, X64_RDX, offsets[4], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM11, X64_RDX, offsets[5], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[6], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM4);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[7], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14);
+ x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2);
+ x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM12);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM3);
+
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM9);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM9);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM14, X64_XMM3);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM2);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM14, X64_XMM14, 0xB1);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM6, extend > 0);
+ extend--;
+
+ x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2);
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM10);
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2);
+
+ x64_sse_movlhps_reg_reg_size(ins, X64_XMM7, X64_XMM4, extend > 0);
+ extend--;
+
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM14);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM4, 0xEE);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM5);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM15);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM13);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM15, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM13, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM1, 0xEE);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM14);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM7);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM4);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM8);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM5);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM6);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM9);
+ x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM2);
+
+ /* loop condition */
+ x64_alu_reg_reg_size(ins, X86_CMP, X64_RBX, X64_RAX, 8);
+ x64_branch_size(ins, X86_CC_NE, leaf_oo_loop, 0, 4);
+#else
+ /* align loop/jump destination */
+ x86_mov_reg_imm(ins, X86_ECX, loop_count);
+ ffts_align_mem16(&ins, 4);
+
+ x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_R9, 0);
+
+ /* beginning of the loop (make sure it's 16 byte aligned) */
+ leaf_oo_loop = ins;
+ assert(!(((uintptr_t) leaf_oo_loop) & 0xF));
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RSI, offsets[0], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM4, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[1], X64_RAX, 2);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RSI, offsets[2], X64_RAX, 2);
+
+ x64_sse_addps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0);
+ extend--;
+
+ x64_sse_subps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RSI, offsets[3], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM1, X64_RSI, offsets[4], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM3, X64_XMM6, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM11, X64_RSI, offsets[5], X64_RAX, 2);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0);
+ extend--;
+
+ x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RSI, offsets[6], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM4);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RSI, offsets[7], X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14);
+ x64_movsxd_reg_memindex(ins, X64_R11, X64_R8, 0, X64_RAX, 2);
+ x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM12);
+ x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM12);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM5);
+
+ x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM9);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM9);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM14, X64_XMM5);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM2);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM14, X64_XMM14, 0xB1);
+
+ x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM6, extend > 0);
+ extend--;
+
+ x64_movsxd_reg_memindex(ins, X64_R12, X64_R8, 8, X64_RAX, 2);
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8);
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM10);
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2);
+
+ x64_sse_movlhps_reg_reg_size(ins, X64_XMM7, X64_XMM4, extend > 0);
+ extend--;
+
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM14);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM14);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM4, 0xEE);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM3);
+ x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM15);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM13);
+ x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM15, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM13, 0xEE);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM1, 0xEE);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R11, 2, X64_XMM14);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R11, 2, X64_XMM7);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R11, 2, X64_XMM4);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R11, 2, X64_XMM8);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R12, 2, X64_XMM3);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R12, 2, X64_XMM6);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R12, 2, X64_XMM9);
+ x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R12, 2, X64_XMM2);
+
+ /* loop condition */
+ x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RAX, 8);
+ x64_branch_size(ins, X86_CC_NE, leaf_oo_loop, 0, 4);
+#endif
+
+ *fp = ins;
}
+static FFTS_INLINE insns_t*
+generate_size8_base_case(insns_t **fp, int sign)
+{
+ insns_t *ins;
+ insns_t *x8_addr;
+ insns_t *x8_soft_loop;
+
+ /* unreferenced parameter */
+ (void) sign;
+
+ /* to avoid deferring */
+ ins = *fp;
+
+ /* align call destination */
+ ffts_align_mem16(&ins, 0);
+ x8_addr = ins;
+
+ /* align loop/jump destination */
+#ifdef _M_X64
+ ffts_align_mem16(&ins, 6);
+#else
+ ffts_align_mem16(&ins, 5);
+#endif
+
+#ifdef _M_X64
+ /* generate function */
+
+ /* input */
+ x64_mov_reg_reg(ins, X64_RAX, X64_R9, 8);
+
+ /* output */
+ x64_mov_reg_reg(ins, X64_RCX, X64_R8, 8);
+
+ /* loop stop (RDX = output + output_stride) */
+ x64_lea_memindex(ins, X64_RDX, X64_R8, 0, X64_RBX, 0);
+
+ /* RSI = 3 * output_stride */
+ x64_lea_memindex(ins, X64_RSI, X64_RBX, 0, X64_RBX, 1);
+
+ /* R10 = 5 * output_stride */
+ x64_lea_memindex(ins, X64_R10, X64_RBX, 0, X64_RBX, 2);
+
+ /* R11 = 7 * output_stride */
+ x64_lea_memindex(ins, X64_R11, X64_RSI, 0, X64_RBX, 2);
+
+ /* beginning of the loop (make sure it's 16 byte aligned) */
+ x8_soft_loop = ins;
+ assert(!(((uintptr_t) x8_soft_loop) & 0xF));
+
+ /* load [input + 0 * input_stride] */
+ x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RAX, 0);
+
+ /* load [output + 2 * output_stride] */
+ x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RCX, 0, X64_RBX, 1);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9);
+
+ /* load [output + 3 * output_stride] */
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RCX, 0, X64_RSI, 0);
+
+ /* load [input + 1 * input_stride] */
+ x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RAX, 16);
+
+ x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM6);
+ x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM7);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM8);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6);
+ x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM7);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8);
+
+ /* load [input + 2 * input_stride] */
+ x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RAX, 32);
+
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9);
+
+ /* load [output + 0 * output_stride] */
+ x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_RCX, 0);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM15);
+
+ /* load [output + 4 * output_stride] */
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RCX, 0, X64_RBX, 2);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM5);
+
+ /* load [output + 6 * output_stride] */
+ x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RCX, 0, X64_RSI, 1);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3);
+
+ /* load [input + 3 * input_stride] */
+ x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_RAX, 48);
+
+ x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM10);
+ x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM10);
+ x64_sse_mulps_reg_reg(ins, X64_XMM15, X64_XMM13);
+
+ /* load [input + 4 * input_stride] */
+ x64_sse_movaps_reg_membase(ins, X64_XMM10, X64_RAX, 64);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM5);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM14);
+ x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14);
+
+ /* load [output + 5 * output_stride] */
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RCX, 0, X64_R10, 0);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM10);
+
+ /* load [output + 7 * output_stride] */
+ x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RCX, 0, X64_R11, 0);
+
+ x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM6);
+
+ /* load [input + 5 * input_stride] */
+ x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RAX, 80);
+
+ /* move input by 6 * input_stride */
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 0x60, 8);
+
+ x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM7);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM15);
+ x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM15);
+ x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM12);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM2);
+ x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM9);
+ x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM7);
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9);
+
+ /* load [output + 1 * output_stride] */
+ x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RCX, 0, X64_RBX, 0);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM4);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM6);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM13);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM1);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3);
+
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM11);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1);
+
+ /* store [output + 0 * output_stride] */
+ x64_sse_movaps_membase_reg(ins, X64_RCX, 0, X64_XMM5);
+
+ /* store [output + 1 * output_stride] */
+ x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 0, X64_XMM4);
+
+ /* store [output + 2 * output_stride] */
+ x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 1, X64_XMM2);
+
+ x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM13);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM13);
+
+ /* store [output + 3 * output_stride] */
+ x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RSI, 0, X64_XMM1);
+
+ /* store [output + 4 * output_stride] */
+ x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 2, X64_XMM0);
+
+ /* store [output + 5 * output_stride] */
+ x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_R10, 0, X64_XMM14);
+
+ /* store [output + 6 * output_stride] */
+ x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RSI, 1, X64_XMM12);
+
+ /* store [output + 7 * output_stride] */
+ x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_R11, 0, X64_XMM6);
+
+ /* move output by 16 */
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RCX, 16, 8);
+
+ /* loop condition */
+ x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RDX, 8);
+ x64_branch_size(ins, X86_CC_NE, x8_soft_loop, 0, 4);
+
+ x64_ret(ins);
+#else
+ /* generate function */
+ x86_clear_reg(ins, X86_EAX);
+ x64_mov_reg_reg(ins, X64_RBX, X64_RDX, 8);
+ x64_mov_reg_reg(ins, X64_RSI, X64_R8, 8);
+
+ x64_lea_memindex(ins, X64_R9, X64_RDX, 0, X64_RCX, 2);
+ x64_lea_memindex(ins, X64_R10, X64_R9, 0, X64_RCX, 2);
+ x64_lea_memindex(ins, X64_R11, X64_R10, 0, X64_RCX, 2);
+ x64_lea_memindex(ins, X64_R12, X64_R11, 0, X64_RCX, 2);
+ x64_lea_memindex(ins, X64_R13, X64_R12, 0, X64_RCX, 2);
+ x64_lea_memindex(ins, X64_R14, X64_R13, 0, X64_RCX, 2);
+ x64_lea_memindex(ins, X64_R15, X64_R14, 0, X64_RCX, 2);
+
+ /* beginning of the loop (make sure it's 16 byte aligned) */
+ x8_soft_loop = ins;
+ assert(!(((uintptr_t) x8_soft_loop) & 0xF));
+
+ x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RSI, 0);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_R10, 0, X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_R11, 0, X64_RAX, 2);
+ x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RSI, 16);
+ x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM6);
+ x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM7);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM8);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6);
+ x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM7);
+ x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RSI, 32);
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9);
+ x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RBX, 0, X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM15);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_R12, 0, X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM5);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_R14, 0, X64_RAX, 2);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3);
+
+ x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_RSI, 48);
+ x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM10);
+ x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM10);
+ x64_sse_mulps_reg_reg(ins, X64_XMM15, X64_XMM13);
+ x64_sse_movaps_reg_membase(ins, X64_XMM10, X64_RSI, 64);
+ x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM5);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1);
+ x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM14);
+ x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM13);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_R13, 0, X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM10);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_R15, 0, X64_RAX, 2);
+ x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM6);
+ x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RSI, 80);
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RSI, 0x60, 8);
+ x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM7);
+ x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM15);
+ x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM15);
+ x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM8);
+ x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM12);
+ x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM12);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3);
+
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM2);
+ x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM9);
+ x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM7);
+ x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9);
+ x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_R9, 0, X64_RAX, 2);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1);
+ x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM4);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1);
+ x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM11);
+ x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM6);
+ x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM6);
+ x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM13);
+ x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4);
+ x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM1);
+ x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM10);
+ x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10);
+
+ /* change sign */
+ x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3);
+
+ x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM11);
+ x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM11);
+ x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1);
+ x64_sse_movaps_memindex_reg(ins, X64_RBX, 0, X64_RAX, 2, X64_XMM5);
+ x64_sse_movaps_memindex_reg(ins, X64_R9, 0, X64_RAX, 2, X64_XMM4);
+ x64_sse_movaps_memindex_reg(ins, X64_R10, 0, X64_RAX, 2, X64_XMM2);
+ x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM13);
+ x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM13);
+ x64_sse_movaps_memindex_reg(ins, X64_R11, 0, X64_RAX, 2, X64_XMM1);
+ x64_sse_movaps_memindex_reg(ins, X64_R12, 0, X64_RAX, 2, X64_XMM0);
+ x64_sse_movaps_memindex_reg(ins, X64_R13, 0, X64_RAX, 2, X64_XMM14);
+ x64_sse_movaps_memindex_reg(ins, X64_R14, 0, X64_RAX, 2, X64_XMM12);
+ x64_sse_movaps_memindex_reg(ins, X64_R15, 0, X64_RAX, 2, X64_XMM6);
+ x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8);
+
+ /* loop condition */
+ x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RAX, 8);
+ x64_branch_size(ins, X86_CC_NE, x8_soft_loop, 0, 4);
+ x64_ret(ins);
#endif
+
+ *fp = ins;
+ return x8_addr;
+}
+
+#endif /* FFTS_CODEGEN_SSE_H */ \ No newline at end of file
diff --git a/lib/ffts/src/ffts.c b/lib/ffts/src/ffts.c
index bec2177..5d72a52 100644
--- a/lib/ffts/src/ffts.c
+++ b/lib/ffts/src/ffts.c
@@ -1,398 +1,539 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
#include "ffts.h"
+
+#include "ffts_internal.h"
+#include "ffts_static.h"
+#include "ffts_trig.h"
#include "macros.h"
-//#include "mini_macros.h"
#include "patterns.h"
-#include "ffts_small.h"
-#ifdef DYNAMIC_DISABLED
- #include "ffts_static.h"
+#ifndef DYNAMIC_DISABLED
+#include "codegen.h"
+#endif
+
+#if _WIN32
+#include <windows.h>
+#else
+#if __APPLE__
+#include <libkern/OSCacheControl.h>
+#endif
+
+#if HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+#endif
+
+#if defined(HAVE_NEON)
+static const FFTS_ALIGN(64) float w_data[16] = {
+ 0.70710678118654757273731092936941f,
+ 0.70710678118654746171500846685376f,
+ -0.70710678118654757273731092936941f,
+ -0.70710678118654746171500846685376f,
+ 1.0f,
+ 0.70710678118654757273731092936941f,
+ -0.0f,
+ -0.70710678118654746171500846685376f,
+ 0.70710678118654757273731092936941f,
+ 0.70710678118654746171500846685376f,
+ 0.70710678118654757273731092936941f,
+ 0.70710678118654746171500846685376f,
+ 1.0f,
+ 0.70710678118654757273731092936941f,
+ 0.0f,
+ 0.70710678118654746171500846685376f
+};
+#endif
+
+static FFTS_INLINE int ffts_allow_execute(void *start, size_t len)
+{
+ int result;
+
+#ifdef _WIN32
+ DWORD old_protect;
+ result = !VirtualProtect(start, len, PAGE_EXECUTE_READ, &old_protect);
#else
- #include "codegen.h"
+ result = mprotect(start, len, PROT_READ | PROT_EXEC);
#endif
-#include <errno.h>
- #include <sys/mman.h>
- #include <string.h>
- #include <limits.h> /* for PAGESIZE */
+ return result;
+}
+
+static FFTS_INLINE int ffts_deny_execute(void *start, size_t len)
+{
+ int result;
+#ifdef _WIN32
+ DWORD old_protect;
+ result = (int) VirtualProtect(start, len, PAGE_READWRITE, &old_protect);
+#else
+ result = mprotect(start, len, PROT_READ | PROT_WRITE);
+#endif
+
+ return result;
+}
+
+static FFTS_INLINE int ffts_flush_instruction_cache(void *start, size_t length)
+{
+#ifdef _WIN32
+ return !FlushInstructionCache(GetCurrentProcess(), start, length);
+#else
+#ifdef __APPLE__
+ sys_icache_invalidate(start, length);
+#elif __ANDROID__
+ cacheflush((long) start, (long) start + length, 0);
+#elif __linux__
+#if GCC_VERSION_AT_LEAST(4,3)
+ __builtin___clear_cache(start, (char*) start + length);
+#elif __GNUC__
+ __clear_cache((long) start, (long) start + length);
+#endif
+#endif
+ return 0;
+#endif
+}
+
+static FFTS_INLINE void *ffts_vmem_alloc(size_t length)
+{
#if __APPLE__
- #include <libkern/OSCacheControl.h>
+ return mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED, -1, 0);
+#elif _WIN32
+ return VirtualAlloc(NULL, length, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#else
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS 0x20
#endif
-void ffts_execute(ffts_plan_t *p, const void * in, void * out) {
- p->transform(p, (const float *)in, (float *)out);
+ return mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
+#endif
}
-void ffts_free(ffts_plan_t *p) {
- p->destroy(p);
+static FFTS_INLINE void ffts_vmem_free(void *addr, size_t length)
+{
+#ifdef _WIN32
+ (void) length;
+ VirtualFree(addr, 0, MEM_RELEASE);
+#else
+ munmap(addr, length);
+#endif
}
-void ffts_free_1d(ffts_plan_t *p) {
-
- size_t i;
-
- if(p->ws) {
- FFTS_FREE(p->ws);
- }
- if(p->is) free(p->is);
- if(p->ws_is) free(p->ws_is);
- if(p->offsets) free(p->offsets);
- //free(p->transforms);
- if(p->transforms) free(p->transforms);
-
- if(p->transform_base) {
- if (mprotect(p->transform_base, p->transform_size, PROT_READ | PROT_WRITE)) {
- perror("Couldn't mprotect");
- exit(errno);
- }
- munmap(p->transform_base, p->transform_size);
- //free(p->transform_base);
- }
- free(p);
+FFTS_API void
+ffts_execute(ffts_plan_t *p, const void *in, void *out)
+{
+ /* TODO: Define NEEDS_ALIGNED properly instead */
+#if defined(HAVE_SSE) || defined(HAVE_NEON)
+ if (((uintptr_t) in % 16) != 0) {
+ LOG("ffts_execute: input buffer needs to be aligned to a 128bit boundary\n");
+ }
+
+ if (((uintptr_t) out % 16) != 0) {
+ LOG("ffts_execute: output buffer needs to be aligned to a 128bit boundary\n");
+ }
+#endif
+
+ p->transform(p, (const float*) in, (float*) out);
}
-ffts_plan_t *ffts_init_1d(size_t N, int sign) {
- ffts_plan_t *p = malloc(sizeof(ffts_plan_t));
- size_t leafN = 8;
- size_t i;
-
-#ifdef __arm__
-//#ifdef HAVE_NEON
- V MULI_SIGN;
-
- if(sign < 0) MULI_SIGN = VLIT4(-0.0f, 0.0f, -0.0f, 0.0f);
- else MULI_SIGN = VLIT4(0.0f, -0.0f, 0.0f, -0.0f);
-//#endif
+FFTS_API void
+ffts_free(ffts_plan_t *p)
+{
+ if (p) {
+ p->destroy(p);
+ }
+}
+
+void ffts_free_1d(ffts_plan_t *p)
+{
+#if !defined(DYNAMIC_DISABLED)
+ if (p->transform_base) {
+ ffts_deny_execute(p->transform_base, p->transform_size);
+ ffts_vmem_free(p->transform_base, p->transform_size);
+ }
+#endif
+
+ if (p->ws_is) {
+ free(p->ws_is);
+ }
+
+ if (p->ws) {
+ FFTS_FREE(p->ws);
+ }
+
+ if (p->is) {
+ free(p->is);
+ }
+
+ if (p->offsets) {
+ free(p->offsets);
+ }
+
+ free(p);
+}
+
+static int
+ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign)
+{
+ V4SF MULI_SIGN;
+ size_t n_luts;
+ ffts_cpx_32f *w;
+ ffts_cpx_32f *tmp;
+ size_t i, j, m, n;
+ int stride;
+
+ if (sign < 0) {
+ MULI_SIGN = V4SF_LIT4(-0.0f, 0.0f, -0.0f, 0.0f);
+ } else {
+ MULI_SIGN = V4SF_LIT4(0.0f, -0.0f, 0.0f, -0.0f);
+ }
+
+ /* LUTS */
+ n_luts = ffts_ctzl(N / leaf_N);
+ if (n_luts >= 32) {
+ n_luts = 0;
+ }
+
+ if (n_luts) {
+ size_t lut_size;
+
+#if defined(__arm__) && !defined(HAVE_NEON)
+ lut_size = leaf_N * (((1 << n_luts) - 2) * 3 + 1) * sizeof(ffts_cpx_32f) / 2;
#else
- V MULI_SIGN;
-
- if(sign < 0) MULI_SIGN = VLIT4(-0.0f, 0.0f, -0.0f, 0.0f);
- else MULI_SIGN = VLIT4(0.0f, -0.0f, 0.0f, -0.0f);
+ lut_size = leaf_N * (((1 << n_luts) - 2) * 3 + 1) * sizeof(ffts_cpx_32f);
#endif
- p->transform = NULL;
- p->transform_base = NULL;
- p->transforms = NULL;
- p->is = NULL;
- p->ws_is = NULL;
- p->ws = NULL;
- p->offsets = NULL;
- p->destroy = ffts_free_1d;
-
- if(N >= 32) {
- ffts_init_offsets(p, N, leafN);
-#ifdef __arm__
+ p->ws = FFTS_MALLOC(lut_size, 32);
+ if (!p->ws) {
+ goto cleanup;
+ }
+
+ p->ws_is = (size_t*) malloc(n_luts * sizeof(*p->ws_is));
+ if (!p->ws_is) {
+ goto cleanup;
+ }
+ }
+
+ w = p->ws;
+ n = leaf_N * 2;
+
#ifdef HAVE_NEON
- ffts_init_is(p, N, leafN, 1);
+ V4SF neg = (sign < 0) ? V4SF_LIT4(0.0f, 0.0f, 0.0f, 0.0f) : V4SF_LIT4(-0.0f, -0.0f, -0.0f, -0.0f);
+#endif
+
+ /* calculate factors */
+ m = leaf_N << (n_luts - 2);
+ tmp = FFTS_MALLOC(m * sizeof(ffts_cpx_32f), 32);
+
+ ffts_generate_cosine_sine_pow2_32f(tmp, m);
+
+ /* generate lookup tables */
+ stride = 1 << (n_luts - 1);
+ for (i = 0; i < n_luts; i++) {
+ p->ws_is[i] = w - (ffts_cpx_32f*) p->ws;
+
+ if (!i) {
+ ffts_cpx_32f *w0 = FFTS_MALLOC(n/4 * sizeof(ffts_cpx_32f), 32);
+ float *fw0 = (float*) w0;
+ float *fw = (float*) w;
+
+ for (j = 0; j < n/4; j++) {
+ w0[j][0] = tmp[j * stride][0];
+ w0[j][1] = tmp[j * stride][1];
+ }
+
+#if defined(__arm__)
+#ifdef HAVE_NEON
+ for (j = 0; j < n/4; j += 4) {
+ V4SF2 temp0 = V4SF2_LD(fw0 + j*2);
+ temp0.val[1] = V4SF_XOR(temp0.val[1], neg);
+ V4SF2_STORE_SPR(fw + j*2, temp0);
+ }
#else
- ffts_init_is(p, N, leafN, 1);
+ for (j = 0; j < n/4; j++) {
+ fw[j*2+0] = fw0[j*2+0];
+ fw[j*2+1] = (sign < 0) ? fw0[j*2+1] : -fw0[j*2+1];
+ }
#endif
+ w += n/4;
#else
- ffts_init_is(p, N, leafN, 1);
+ for (j = 0; j < n/4; j += 2) {
+ V4SF re, im, temp0;
+ temp0 = V4SF_LD(fw0 + j*2);
+ re = V4SF_DUPLICATE_RE(temp0);
+ im = V4SF_DUPLICATE_IM(temp0);
+ im = V4SF_XOR(im, MULI_SIGN);
+ V4SF_ST(fw + j*4 + 0, re);
+ V4SF_ST(fw + j*4 + 4, im);
+ }
+
+ w += n/4 * 2;
#endif
-
- p->i0 = N/leafN/3+1;
- p->i1 = N/leafN/3;
- if((N/leafN) % 3 > 1) p->i1++;
- p->i2 = N/leafN/3;
-
- #ifdef __arm__
- #ifdef HAVE_NEON
- p->i0/=2;
- p->i1/=2;
- #endif
- #else
- p->i0/=2;
- p->i1/=2;
- #endif
-
- }else{
- p->transforms = malloc(2 * sizeof(transform_index_t));
- p->transforms[0] = 0;
- p->transforms[1] = 1;
- if(N == 2) p->transform = &firstpass_2;
- else if(N == 4 && sign == -1) p->transform = &firstpass_4_f;
- else if(N == 4 && sign == 1) p->transform = &firstpass_4_b;
- else if(N == 8 && sign == -1) p->transform = &firstpass_8_f;
- else if(N == 8 && sign == 1) p->transform = &firstpass_8_b;
- else if(N == 16 && sign == -1) p->transform = &firstpass_16_f;
- else if(N == 16 && sign == 1) p->transform = &firstpass_16_b;
-
- p->is = NULL;
- p->offsets = NULL;
- }
-
- int hardcoded = 0;
-
- /* LUTS */
- size_t n_luts = __builtin_ctzl(N/leafN);
- if(N < 32) { n_luts = __builtin_ctzl(N/4); hardcoded = 1; }
-
- if(n_luts >= 32) n_luts = 0;
-
-// fprintf(stderr, "n_luts = %zu\n", n_luts);
-
- cdata_t *w;
-
- int n = leafN*2;
- if(hardcoded) n = 8;
-
- size_t lut_size = 0;
-
- for(i=0;i<n_luts;i++) {
- if(!i || hardcoded) {
- #ifdef __arm__
- if(N <= 32) lut_size += n/4 * 2 * sizeof(cdata_t);
- else lut_size += n/4 * sizeof(cdata_t);
- #else
- lut_size += n/4 * 2 * sizeof(cdata_t);
- #endif
- n *= 2;
- } else {
- #ifdef __arm__
- lut_size += n/8 * 3 * sizeof(cdata_t);
- #else
- lut_size += n/8 * 3 * 2 * sizeof(cdata_t);
- #endif
- }
- n *= 2;
- }
-
-// lut_size *= 16;
-
- // fprintf(stderr, "lut size = %zu\n", lut_size);
- if(n_luts) {
- p->ws = FFTS_MALLOC(lut_size,32);
- p->ws_is = malloc(n_luts * sizeof(size_t));
- }else{
- p->ws = NULL;
- p->ws_is = NULL;
- }
- w = p->ws;
-
- n = leafN*2;
- if(hardcoded) n = 8;
-
- #ifdef HAVE_NEON
- V neg = (sign < 0) ? VLIT4(0.0f, 0.0f, 0.0f, 0.0f) : VLIT4(-0.0f, -0.0f, -0.0f, -0.0f);
- #endif
-
- for(i=0;i<n_luts;i++) {
- p->ws_is[i] = w - (cdata_t *)p->ws;
- //fprintf(stderr, "LUT[%zu] = %d @ %08x - %zu\n", i, n, w, p->ws_is[i]);
-
- if(!i || hardcoded) {
- cdata_t *w0 = FFTS_MALLOC(n/4 * sizeof(cdata_t), 32);
-
- size_t j;
- for(j=0;j<n/4;j++) {
- w0[j][0] = W_re(n,j);
- w0[j][1] = W_im(n,j);
- }
-
-
- float *fw0 = (float *)w0;
- #ifdef __arm__
- if(N < 32) {
- //w = FFTS_MALLOC(n/4 * 2 * sizeof(cdata_t), 32);
- float *fw = (float *)w;
- V temp0, temp1, temp2;
- for(j=0;j<n/4;j+=2) {
- // #ifdef HAVE_NEON
- temp0 = VLD(fw0 + j*2);
- V re, im;
- re = VDUPRE(temp0);
- im = VDUPIM(temp0);
- #ifdef HAVE_NEON
- im = VXOR(im, MULI_SIGN);
- //im = IMULI(sign>0, im);
- #else
- im = MULI(sign>0, im);
- #endif
- VST(fw + j*4 , re);
- VST(fw + j*4+4, im);
- // #endif
- }
- w += n/4 * 2;
- }else{
- //w = FFTS_MALLOC(n/4 * sizeof(cdata_t), 32);
- float *fw = (float *)w;
- #ifdef HAVE_NEON
- VS temp0, temp1, temp2;
- for(j=0;j<n/4;j+=4) {
- temp0 = VLD2(fw0 + j*2);
- temp0.val[1] = VXOR(temp0.val[1], neg);
- STORESPR(fw + j*2, temp0);
- }
- #else
- for(j=0;j<n/4;j+=1) {
- fw[j*2] = fw0[j*2];
- fw[j*2+1] = (sign < 0) ? fw0[j*2+1] : -fw0[j*2+1];
- }
- #endif
- w += n/4;
- }
- #else
- //w = FFTS_MALLOC(n/4 * 2 * sizeof(cdata_t), 32);
- float *fw = (float *)w;
- V temp0, temp1, temp2;
- for(j=0;j<n/4;j+=2) {
- temp0 = VLD(fw0 + j*2);
- V re, im;
- re = VDUPRE(temp0);
- im = VDUPIM(temp0);
- im = VXOR(im, MULI_SIGN);
- VST(fw + j*4 , re);
- VST(fw + j*4+4, im);
- }
- w += n/4 * 2;
- #endif
-
- FFTS_FREE(w0);
- }else{
-
- cdata_t *w0 = FFTS_MALLOC(n/8 * sizeof(cdata_t), 32);
- cdata_t *w1 = FFTS_MALLOC(n/8 * sizeof(cdata_t), 32);
- cdata_t *w2 = FFTS_MALLOC(n/8 * sizeof(cdata_t), 32);
-
- size_t j;
- for(j=0;j<n/8;j++) {
- w0[j][0] = W_re(n,j*2);
- w0[j][1] = W_im(n,j*2);
- w1[j][0] = W_re(n,j);
- w1[j][1] = W_im(n,j);
- w2[j][0] = W_re(n,j + (n/8));
- w2[j][1] = W_im(n,j + (n/8));
-
- }
-
- float *fw0 = (float *)w0;
- float *fw1 = (float *)w1;
- float *fw2 = (float *)w2;
- #ifdef __arm__
- //w = FFTS_MALLOC(n/8 * 3 * sizeof(cdata_t), 32);
- float *fw = (float *)w;
- #ifdef HAVE_NEON
- VS temp0, temp1, temp2;
- for(j=0;j<n/8;j+=4) {
- temp0 = VLD2(fw0 + j*2);
- temp0.val[1] = VXOR(temp0.val[1], neg);
- STORESPR(fw + j*2*3, temp0);
- temp1 = VLD2(fw1 + j*2);
- temp1.val[1] = VXOR(temp1.val[1], neg);
- STORESPR(fw + j*2*3 + 8, temp1);
- temp2 = VLD2(fw2 + j*2);
- temp2.val[1] = VXOR(temp2.val[1], neg);
- STORESPR(fw + j*2*3 + 16, temp2);
- }
- #else
- for(j=0;j<n/8;j+=1) {
- fw[j*6] = fw0[j*2];
- fw[j*6+1] = (sign < 0) ? fw0[j*2+1] : -fw0[j*2+1];
- fw[j*6+2] = fw1[j*2+0];
- fw[j*6+3] = (sign < 0) ? fw1[j*2+1] : -fw1[j*2+1];
- fw[j*6+4] = fw2[j*2+0];
- fw[j*6+5] = (sign < 0) ? fw2[j*2+1] : -fw2[j*2+1];
- }
- #endif
- w += n/8 * 3;
- #else
- //w = FFTS_MALLOC(n/8 * 3 * 2 * sizeof(cdata_t), 32);
- float *fw = (float *)w;
- V temp0, temp1, temp2, re, im;
- for(j=0;j<n/8;j+=2) {
- temp0 = VLD(fw0 + j*2);
- re = VDUPRE(temp0);
- im = VDUPIM(temp0);
- im = VXOR(im, MULI_SIGN);
- VST(fw + j*2*6 , re);
- VST(fw + j*2*6+4, im);
-
- temp1 = VLD(fw1 + j*2);
- re = VDUPRE(temp1);
- im = VDUPIM(temp1);
- im = VXOR(im, MULI_SIGN);
- VST(fw + j*2*6+8 , re);
- VST(fw + j*2*6+12, im);
-
- temp2 = VLD(fw2 + j*2);
- re = VDUPRE(temp2);
- im = VDUPIM(temp2);
- im = VXOR(im, MULI_SIGN);
- VST(fw + j*2*6+16, re);
- VST(fw + j*2*6+20, im);
- }
- w += n/8 * 3 * 2;
- #endif
-
- FFTS_FREE(w0);
- FFTS_FREE(w1);
- FFTS_FREE(w2);
- }
- ///p->ws[i] = w;
-
- n *= 2;
- }
-
- float *tmp = (float *)p->ws;
-
- if(sign < 0) {
- p->oe_ws = (void *)(&w_data[4]);
- p->ee_ws = (void *)(w_data);
- p->eo_ws = (void *)(&w_data[4]);
- }else{
- p->oe_ws = (void *)(w_data + 12);
- p->ee_ws = (void *)(w_data + 8);
- p->eo_ws = (void *)(w_data + 12);
- }
-
- p->N = N;
- p->lastlut = w;
- p->n_luts = n_luts;
-#ifdef DYNAMIC_DISABLED
- if(sign < 0) {
- if(N >= 32) p->transform = ffts_static_transform_f;
- }else{
- if(N >= 32) p->transform = ffts_static_transform_i;
- }
+ FFTS_FREE(w0);
+ } else {
+ ffts_cpx_32f *w0 = (ffts_cpx_32f*) FFTS_MALLOC(n/8 * sizeof(ffts_cpx_32f), 32);
+ ffts_cpx_32f *w1 = (ffts_cpx_32f*) FFTS_MALLOC(n/8 * sizeof(ffts_cpx_32f), 32);
+ ffts_cpx_32f *w2 = (ffts_cpx_32f*) FFTS_MALLOC(n/8 * sizeof(ffts_cpx_32f), 32);
+
+ float *fw0 = (float*) w0;
+ float *fw1 = (float*) w1;
+ float *fw2 = (float*) w2;
+
+ float *fw = (float *)w;
+
+ for (j = 0; j < n/8; j++) {
+ w0[j][0] = tmp[2 * j * stride][0];
+ w0[j][1] = tmp[2 * j * stride][1];
+
+ w1[j][0] = tmp[j * stride][0];
+ w1[j][1] = tmp[j * stride][1];
+
+ w2[j][0] = tmp[(j + (n/8)) * stride][0];
+ w2[j][1] = tmp[(j + (n/8)) * stride][1];
+ }
+
+#if defined(__arm__)
+#ifdef HAVE_NEON
+ for (j = 0; j < n/8; j += 4) {
+ V4SF2 temp0, temp1, temp2;
+
+ temp0 = V4SF2_LD(fw0 + j*2);
+ temp0.val[1] = V4SF_XOR(temp0.val[1], neg);
+ V4SF2_STORE_SPR(fw + j*2*3, temp0);
+
+ temp1 = V4SF2_LD(fw1 + j*2);
+ temp1.val[1] = V4SF_XOR(temp1.val[1], neg);
+ V4SF2_STORE_SPR(fw + j*2*3 + 8, temp1);
+
+ temp2 = V4SF2_LD(fw2 + j*2);
+ temp2.val[1] = V4SF_XOR(temp2.val[1], neg);
+ V4SF2_STORE_SPR(fw + j*2*3 + 16, temp2);
+ }
#else
- if(N>=32) ffts_generate_func_code(p, N, leafN, sign);
+ for (j = 0; j < n/8; j++) {
+ fw[j*6+0] = fw0[j*2+0];
+ fw[j*6+1] = (sign < 0) ? fw0[j*2+1] : -fw0[j*2+1];
+ fw[j*6+2] = fw1[j*2+0];
+ fw[j*6+3] = (sign < 0) ? fw1[j*2+1] : -fw1[j*2+1];
+ fw[j*6+4] = fw2[j*2+0];
+ fw[j*6+5] = (sign < 0) ? fw2[j*2+1] : -fw2[j*2+1];
+ }
#endif
+ w += n/8 * 3;
+#else
+ for (j = 0; j < n/8; j += 2) {
+ V4SF temp0, temp1, temp2, re, im;
+
+ temp0 = V4SF_LD(fw0 + j*2);
+ re = V4SF_DUPLICATE_RE(temp0);
+ im = V4SF_DUPLICATE_IM(temp0);
+ im = V4SF_XOR(im, MULI_SIGN);
+ V4SF_ST(fw + j*2*6+0, re);
+ V4SF_ST(fw + j*2*6+4, im);
+
+ temp1 = V4SF_LD(fw1 + j*2);
+ re = V4SF_DUPLICATE_RE(temp1);
+ im = V4SF_DUPLICATE_IM(temp1);
+ im = V4SF_XOR(im, MULI_SIGN);
+ V4SF_ST(fw + j*2*6+8 , re);
+ V4SF_ST(fw + j*2*6+12, im);
+
+ temp2 = V4SF_LD(fw2 + j*2);
+ re = V4SF_DUPLICATE_RE(temp2);
+ im = V4SF_DUPLICATE_IM(temp2);
+ im = V4SF_XOR(im, MULI_SIGN);
+ V4SF_ST(fw + j*2*6+16, re);
+ V4SF_ST(fw + j*2*6+20, im);
+ }
+
+ w += n/8 * 3 * 2;
+#endif
+
+ FFTS_FREE(w0);
+ FFTS_FREE(w1);
+ FFTS_FREE(w2);
+ }
+
+ n *= 2;
+ stride >>= 1;
+ }
+
+#if defined(HAVE_NEON)
+ if (sign < 0) {
+ p->oe_ws = (void*)(w_data + 4);
+ p->ee_ws = (void*)(w_data);
+ p->eo_ws = (void*)(w_data + 4);
+ } else {
+ p->oe_ws = (void*)(w_data + 12);
+ p->ee_ws = (void*)(w_data + 8);
+ p->eo_ws = (void*)(w_data + 12);
+ }
+#endif
+
+ FFTS_FREE(tmp);
- return p;
+ p->lastlut = w;
+ p->n_luts = n_luts;
+ return 0;
+
+cleanup:
+ return -1;
}
+FFTS_API ffts_plan_t*
+ffts_init_1d(size_t N, int sign)
+{
+ const size_t leaf_N = 8;
+ ffts_plan_t *p;
+
+ if (N < 2 || (N & (N - 1)) != 0) {
+ LOG("FFT size must be a power of two\n");
+ return NULL;
+ }
+
+ p = calloc(1, sizeof(*p));
+ if (!p) {
+ return NULL;
+ }
+
+ p->destroy = ffts_free_1d;
+ p->N = N;
+
+ if (N >= 32) {
+ /* generate lookup tables */
+ if (ffts_generate_luts(p, N, leaf_N, sign)) {
+ goto cleanup;
+ }
+
+ p->offsets = ffts_init_offsets(N, leaf_N);
+ if (!p->offsets) {
+ goto cleanup;
+ }
+
+ p->is = ffts_init_is(N, leaf_N, 1);
+ if (!p->is) {
+ goto cleanup;
+ }
+
+ p->i0 = N/leaf_N/3 + 1;
+ p->i1 = p->i2 = N/leaf_N/3;
+ if ((N/leaf_N) % 3 > 1) {
+ p->i1++;
+ }
+
+#if !defined(HAVE_VFP) || defined(DYNAMIC_DISABLED)
+ p->i0 /= 2;
+ p->i1 /= 2;
+#endif
+
+#ifdef DYNAMIC_DISABLED
+ if (sign < 0) {
+ p->transform = ffts_static_transform_f_32f;
+ } else {
+ p->transform = ffts_static_transform_i_32f;
+ }
+#else
+ /* determinate transform size */
+#if defined(__arm__)
+ if (N < 8192) {
+ p->transform_size = 8192;
+ } else {
+ p->transform_size = N;
+ }
+#else
+ if (N < 2048) {
+ p->transform_size = 16384;
+ } else {
+ p->transform_size = 16384 + 2*N/8 * ffts_ctzl(N);
+ }
+#endif
+
+ /* allocate code/function buffer */
+ p->transform_base = ffts_vmem_alloc(p->transform_size);
+ if (!p->transform_base) {
+ goto cleanup;
+ }
+
+ /* generate code */
+ p->transform = ffts_generate_func_code(p, N, leaf_N, sign);
+ if (!p->transform) {
+ goto cleanup;
+ }
+
+ /* enable execution with read access for the block */
+ if (ffts_allow_execute(p->transform_base, p->transform_size)) {
+ goto cleanup;
+ }
+
+ /* flush from the instruction cache */
+ if (ffts_flush_instruction_cache(p->transform_base, p->transform_size)) {
+ goto cleanup;
+ }
+#endif
+ } else {
+ switch (N) {
+ case 2:
+ p->transform = &ffts_small_2_32f;
+ break;
+ case 4:
+ if (sign == -1) {
+ p->transform = &ffts_small_forward4_32f;
+ } else if (sign == 1) {
+ p->transform = &ffts_small_backward4_32f;
+ }
+ break;
+ case 8:
+ if (sign == -1) {
+ p->transform = &ffts_small_forward8_32f;
+ } else if (sign == 1) {
+ p->transform = &ffts_small_backward8_32f;
+ }
+ break;
+ case 16:
+ default:
+ if (sign == -1) {
+ p->transform = &ffts_small_forward16_32f;
+ } else {
+ p->transform = &ffts_small_backward16_32f;
+ }
+ break;
+ }
+ }
+
+ return p;
+
+cleanup:
+ ffts_free_1d(p);
+ return NULL;
+} \ No newline at end of file
diff --git a/lib/ffts/src/ffts.h b/lib/ffts/src/ffts.h
deleted file mode 100644
index 4409029..0000000
--- a/lib/ffts/src/ffts.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-#ifndef __CP_SSE_H__
-#define __CP_SSE_H__
-
-#include "config.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <math.h>
-#include <stddef.h>
-#include <stdint.h>
-//#include <stdalign.h>
-
-//#include "codegen.h"
-#include "types.h"
-
-#define PI 3.1415926535897932384626433832795028841971693993751058209
-
-static const __attribute__ ((aligned(64))) float w_data[16] = {
- 0.70710678118654757273731092936941, 0.70710678118654746171500846685376,
- -0.70710678118654757273731092936941, -0.70710678118654746171500846685376,
- 1.0f, 0.70710678118654757273731092936941f,
- -0.0f, -0.70710678118654746171500846685376,
- 0.70710678118654757273731092936941, 0.70710678118654746171500846685376,
- 0.70710678118654757273731092936941, 0.70710678118654746171500846685376,
- 1.0f, 0.70710678118654757273731092936941f,
- 0.0f, 0.70710678118654746171500846685376
-};
-
-__INLINE float W_re(float N, float k) { return cos(-2.0f * PI * k / N); }
-__INLINE float W_im(float N, float k) { return sin(-2.0f * PI * k / N); }
-
-typedef size_t transform_index_t;
-
-//typedef void (*transform_func_t)(float *data, size_t N, float *LUT);
-typedef void (*transform_func_t)(float *data, size_t N, float *LUT);
-
-typedef struct _ffts_plan_t ffts_plan_t;
-
-/**
- * Contains all the Information need to perform FFT
- *
- *
- * DO NOT CHANGE THE ORDER OF MEMBERS
- * ASSEMBLY CODE USES HARD CODED OFFSETS TO REFERENCE
- * SOME OF THESE VARIABES!!
- */
-struct _ffts_plan_t {
-
- /**
- *
- */
- ptrdiff_t *offsets;
-#ifdef DYNAMIC_DISABLED
- /**
- * Twiddle factors
- */
- void *ws;
- /**
- * ee - 2 size x size8
- * oo - 2 x size4 in parallel
- * oe -
- */
- void *oe_ws, *eo_ws, *ee_ws;
-#else
- void __attribute__((aligned(32))) *ws;
- void __attribute__((aligned(32))) *oe_ws, *eo_ws, *ee_ws;
-#endif
- /**
- * Pointer into an array of precomputed indexes for the input data array
- */
- ptrdiff_t *is;
-
- /**
- * Twiddle Factor Indexes
- */
- size_t *ws_is;
-
- /**
- * Size of the loops for the base cases
- */
- size_t i0, i1, n_luts;
-
- /**
- * Size fo the Transform
- */
- size_t N;
- void *lastlut;
- /**
- * Used in multidimensional Code ??
- */
- transform_index_t *transforms;
- //transform_func_t transform;
-
- /**
- * Pointer to the dynamically generated function
- * that will execute the FFT
- */
- void (*transform)(ffts_plan_t * , const void * , void * );
-
- /**
- * Pointer to the base memory address of
- * of the transform function
- */
- void *transform_base;
-
- /**
- * Size of the memory block contain the
- * generated code
- */
- size_t transform_size;
-
- /**
- * Points to the cosnant variables used by
- * the Assembly Code
- */
- void *constants;
-
- // multi-dimensional stuff:
- struct _ffts_plan_t **plans;
- int rank;
- size_t *Ns, *Ms;
- void *buf;
-
- void *transpose_buf;
-
- /**
- * Pointer to the destroy function
- * to clean up the plan after use
- * (differs for real and multi dimension transforms
- */
- void (*destroy)(ffts_plan_t *);
-
- /**
- * Coefficiants for the real valued transforms
- */
- float *A, *B;
-
- size_t i2;
-};
-
-
-void ffts_free(ffts_plan_t *);
-ffts_plan_t *ffts_init_1d(size_t N, int sign);
-void ffts_execute(ffts_plan_t *, const void *, void *);
-#endif
diff --git a/lib/ffts/src/ffts_attributes.h b/lib/ffts/src/ffts_attributes.h
new file mode 100644
index 0000000..bdfd616
--- /dev/null
+++ b/lib/ffts/src/ffts_attributes.h
@@ -0,0 +1,111 @@
+/*
+
+ This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+ Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+ Copyright (c) 2012, The University of Waikato
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the organization nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef FFTS_ATTRIBUTES_H
+#define FFTS_ATTRIBUTES_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
+
+/* Macro definitions for various function/variable attributes */
+#ifdef __GNUC__
+#define GCC_VERSION_AT_LEAST(x,y) \
+ (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y)
+#else
+#define GCC_VERSION_AT_LEAST(x,y) 0
+#endif
+
+#ifdef __GNUC__
+#define FFTS_ALIGN(x) __attribute__((aligned(x)))
+#elif defined(_MSC_VER)
+#define FFTS_ALIGN(x) __declspec(align(x))
+#else
+#define FFTS_ALIGN(x)
+#endif
+
+#if GCC_VERSION_AT_LEAST(3,1)
+#define FFTS_ALWAYS_INLINE __attribute__((always_inline)) inline
+#elif defined(_MSC_VER)
+#define FFTS_ALWAYS_INLINE __forceinline
+#else
+#define FFTS_ALWAYS_INLINE inline
+#endif
+
+#if defined(_MSC_VER)
+#define FFTS_INLINE __inline
+#else
+#define FFTS_INLINE inline
+#endif
+
+#if defined(__GNUC__)
+#define FFTS_RESTRICT __restrict
+#elif defined(_MSC_VER)
+#define FFTS_RESTRICT __restrict
+#else
+#define FFTS_RESTRICT
+#endif
+
+#if GCC_VERSION_AT_LEAST(4,5)
+#define FFTS_ASSUME(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
+#elif defined(_MSC_VER)
+#define FFTS_ASSUME(cond) __assume(cond)
+#else
+#define FFTS_ASSUME(cond)
+#endif
+
+#if GCC_VERSION_AT_LEAST(4,7)
+#define FFTS_ASSUME_ALIGNED_16(x) __builtin_assume_aligned(x, 16)
+#else
+#define FFTS_ASSUME_ALIGNED_16(x) x
+#endif
+
+#if GCC_VERSION_AT_LEAST(4,7)
+#define FFTS_ASSUME_ALIGNED_32(x) __builtin_assume_aligned(x, 32)
+#else
+#define FFTS_ASSUME_ALIGNED_32(x) x
+#endif
+
+#if defined(__GNUC__)
+#define FFTS_LIKELY(cond) __builtin_expect(!!(cond), 1)
+#else
+#define FFTS_LIKELY(cond) cond
+#endif
+
+#if defined(__GNUC__)
+#define FFTS_UNLIKELY(cond) __builtin_expect(!!(cond), 0)
+#else
+#define FFTS_UNLIKELY(cond) cond
+#endif
+
+#endif /* FFTS_ATTRIBUTES_H */
diff --git a/lib/ffts/src/ffts_dd.h b/lib/ffts/src/ffts_dd.h
new file mode 100644
index 0000000..f8bbee4
--- /dev/null
+++ b/lib/ffts/src/ffts_dd.h
@@ -0,0 +1,230 @@
+/*
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2015, Jukka Ojanen <jukka.ojanen@kolumbus.fi>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef FFTS_DD_H
+#define FFTS_DD_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
+
+#include "ffts_attributes.h"
+
+#if HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+/* double-double number */
+struct ffts_dd_t
+{
+ double hi;
+ double lo;
+};
+
+#if HAVE_SSE2
+/* double-double vector */
+struct ffts_dd2_t {
+ __m128d hi;
+ __m128d lo;
+};
+#endif
+
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_add_dd_unnormalized(const struct ffts_dd_t a,
+ const struct ffts_dd_t b);
+
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_mul_dd_unnormalized(const struct ffts_dd_t a,
+ const struct ffts_dd_t b);
+
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_split(double a);
+
+/* aka quick-two-sum */
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_add(double a, double b)
+{
+ struct ffts_dd_t dd;
+ dd.hi = a + b;
+ dd.lo = b - (dd.hi - a);
+ return dd;
+}
+
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_add_dd(const struct ffts_dd_t a,
+ const struct ffts_dd_t b)
+{
+ struct ffts_dd_t t1 = ffts_dd_add_dd_unnormalized(a, b);
+ return ffts_dd_add(t1.hi, t1.lo);
+}
+
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_add_dd_unnormalized(const struct ffts_dd_t a,
+ const struct ffts_dd_t b)
+{
+ struct ffts_dd_t dd;
+ double e1;
+ dd.hi = a.hi + b.hi;
+ e1 = dd.hi - a.hi;
+ dd.lo = ((a.hi - (dd.hi - e1)) + (b.hi - e1)) + (a.lo + b.lo);
+ return dd;
+}
+
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_mul(const double a, const double b)
+{
+ struct ffts_dd_t dd;
+ struct ffts_dd_t t1 = ffts_dd_split(a);
+ struct ffts_dd_t t2 = ffts_dd_split(b);
+ dd.hi = a * b;
+ dd.lo = (t1.hi * t2.hi - dd.hi);
+ dd.lo += (t1.hi * t2.lo + t1.lo * t2.hi);
+ dd.lo += t1.lo * t2.lo;
+ return dd;
+}
+
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_mul_dd(const struct ffts_dd_t a,
+ const struct ffts_dd_t b)
+{
+ struct ffts_dd_t dd = ffts_dd_mul_dd_unnormalized(a, b);
+ return ffts_dd_add(dd.hi, dd.lo);
+}
+
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_mul_dd_unnormalized(const struct ffts_dd_t a,
+ const struct ffts_dd_t b)
+{
+ struct ffts_dd_t dd = ffts_dd_mul(a.hi, b.hi);
+ dd.lo += (a.hi * b.lo + a.lo * b.hi);
+ return dd;
+}
+
+static FFTS_INLINE struct ffts_dd_t
+ffts_dd_split(double a)
+{
+ /* 2^27+1 = 134217729 */
+ struct ffts_dd_t dd;
+ double t = 134217729.0 * a;
+ dd.hi = t - (t - a);
+ dd.lo = a - dd.hi;
+ return dd;
+}
+
+#if HAVE_SSE2
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_add_dd2_unnormalized(const struct ffts_dd2_t *const FFTS_RESTRICT a,
+ const struct ffts_dd2_t *const FFTS_RESTRICT b);
+
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_mul_dd2_unnormalized(const struct ffts_dd2_t *const FFTS_RESTRICT a,
+ const struct ffts_dd2_t *const FFTS_RESTRICT b);
+
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_split(__m128d a);
+
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_add(__m128d a, __m128d b)
+{
+ struct ffts_dd2_t dd2;
+ dd2.hi = _mm_add_pd(a, b);
+ dd2.lo = _mm_sub_pd(b, _mm_sub_pd(dd2.hi, a));
+ return dd2;
+}
+
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_add_dd2(const struct ffts_dd2_t *const FFTS_RESTRICT a,
+ const struct ffts_dd2_t *const FFTS_RESTRICT b)
+{
+ struct ffts_dd2_t t1 = ffts_dd2_add_dd2_unnormalized(a, b);
+ return ffts_dd2_add(t1.hi, t1.lo);
+}
+
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_add_dd2_unnormalized(const struct ffts_dd2_t *const FFTS_RESTRICT a,
+ const struct ffts_dd2_t *const FFTS_RESTRICT b)
+{
+ struct ffts_dd2_t dd2;
+ __m128d e1;
+ dd2.hi = _mm_add_pd(a->hi, b->hi);
+ e1 = _mm_sub_pd(dd2.hi, a->hi);
+ dd2.lo = _mm_add_pd(_mm_add_pd(_mm_sub_pd(a->hi, _mm_sub_pd(dd2.hi, e1)),
+ _mm_sub_pd(b->hi, e1)), _mm_add_pd(a->lo, b->lo));
+ return dd2;
+}
+
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_mul(const __m128d a, const __m128d b)
+{
+ struct ffts_dd2_t dd2;
+ struct ffts_dd2_t t1 = ffts_dd2_split(a);
+ struct ffts_dd2_t t2 = ffts_dd2_split(b);
+ dd2.hi = _mm_mul_pd(a, b);
+ dd2.lo = _mm_add_pd(_mm_add_pd(_mm_sub_pd(
+ _mm_mul_pd(t1.hi, t2.hi), dd2.hi),
+ _mm_add_pd(_mm_mul_pd(t1.hi, t2.lo),
+ _mm_mul_pd(t1.lo, t2.hi))),
+ _mm_mul_pd(t1.lo, t2.lo));
+ return dd2;
+}
+
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_mul_dd2(const struct ffts_dd2_t *const FFTS_RESTRICT a,
+ const struct ffts_dd2_t *const FFTS_RESTRICT b)
+{
+ struct ffts_dd2_t dd2 = ffts_dd2_mul_dd2_unnormalized(a, b);
+ return ffts_dd2_add(dd2.hi, dd2.lo);
+}
+
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_mul_dd2_unnormalized(const struct ffts_dd2_t *const FFTS_RESTRICT a,
+ const struct ffts_dd2_t *const FFTS_RESTRICT b)
+{
+ struct ffts_dd2_t dd2 = ffts_dd2_mul(a->hi, b->hi);
+ dd2.lo = _mm_add_pd(dd2.lo, _mm_add_pd(
+ _mm_mul_pd(a->hi, b->lo), _mm_mul_pd(a->lo, b->hi)));
+ return dd2;
+}
+
+static FFTS_INLINE struct ffts_dd2_t
+ffts_dd2_split(__m128d a)
+{
+ /* 2^27+1 = 134217729 */
+ struct ffts_dd2_t dd2;
+ __m128d t = _mm_mul_pd(a, _mm_set1_pd(134217729.0));
+ dd2.hi = _mm_sub_pd(t, _mm_sub_pd(t, a));
+ dd2.lo = _mm_sub_pd(a, dd2.hi);
+ return dd2;
+}
+#endif /* HAVE_SSE2 */
+
+#endif /* FFTS_DD_H */
diff --git a/lib/ffts/src/ffts_internal.h b/lib/ffts/src/ffts_internal.h
new file mode 100644
index 0000000..6b88b35
--- /dev/null
+++ b/lib/ffts/src/ffts_internal.h
@@ -0,0 +1,215 @@
+/*
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef FFTS_INTERNAL_H
+#define FFTS_INTERNAL_H
+
+//#include "config.h"
+#include "ffts_attributes.h"
+#include "types.h"
+
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+#include <stddef.h>
+
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef ENABLE_LOG
+#ifdef __ANDROID__
+#include <android/log.h>
+#define LOG(s) __android_log_print(ANDROID_LOG_ERROR, "FFTS", s)
+#else
+#define LOG(s) fprintf(stderr, s)
+#endif
+#else
+#define LOG(s)
+#endif
+
+struct _ffts_plan_t;
+typedef void (*transform_func_t)(struct _ffts_plan_t *p, const void *in, void *out);
+
+/**
+ * Contains all the Information need to perform FFT
+ *
+ *
+ * DO NOT CHANGE THE ORDER OF MEMBERS
+ * ASSEMBLY CODE USES HARD CODED OFFSETS TO REFERENCE
+ * SOME OF THESE VARIABES!!
+ */
+struct _ffts_plan_t {
+
+ /**
+ *
+ */
+ ptrdiff_t *offsets;
+#ifdef DYNAMIC_DISABLED
+ /**
+ * Twiddle factors
+ */
+ void *ws;
+
+ /**
+ * ee - 2 size x size8
+ * oo - 2 x size4 in parallel
+ * oe -
+ */
+ void *oe_ws, *eo_ws, *ee_ws;
+#else
+ void FFTS_ALIGN(32) *ws;
+ void FFTS_ALIGN(32) *oe_ws, *eo_ws, *ee_ws;
+#endif
+
+ /**
+ * Pointer into an array of precomputed indexes for the input data array
+ */
+ ptrdiff_t *is;
+
+ /**
+ * Twiddle Factor Indexes
+ */
+ size_t *ws_is;
+
+ /**
+ * Size of the loops for the base cases
+ */
+ size_t i0, i1, n_luts;
+
+ /**
+ * Size fo the Transform
+ */
+ size_t N;
+ void *lastlut;
+
+#ifdef __arm__
+ size_t *temporary_fix_as_dynamic_code_assumes_fixed_offset;
+#endif
+
+ /**
+ * Pointer to the dynamically generated function
+ * that will execute the FFT
+ */
+ transform_func_t transform;
+
+ /**
+ * Pointer to the base memory address of
+ * of the transform function
+ */
+ void *transform_base;
+
+ /**
+ * Size of the memory block contain the
+ * generated code
+ */
+ size_t transform_size;
+
+ /**
+ * Points to the cosnant variables used by
+ * the Assembly Code
+ */
+ void *constants;
+
+ // multi-dimensional stuff:
+ struct _ffts_plan_t **plans;
+ int rank;
+ size_t *Ns, *Ms;
+ void *buf;
+
+ void *transpose_buf;
+
+ /**
+ * Pointer to the destroy function
+ * to clean up the plan after use
+ * (differs for real and multi dimension transforms
+ */
+ void (*destroy)(struct _ffts_plan_t *);
+
+ /**
+ * Coefficiants for the real valued transforms
+ */
+ float *A, *B;
+
+ size_t i2;
+};
+
+static FFTS_INLINE void *ffts_aligned_malloc(size_t size)
+{
+#if defined(_WIN32)
+ return _aligned_malloc(size, 32);
+#else
+ return valloc(size);
+#endif
+}
+
+static FFTS_INLINE void ffts_aligned_free(void *p)
+{
+#if defined(_WIN32)
+ _aligned_free(p);
+#else
+ free(p);
+#endif
+}
+
+#if GCC_VERSION_AT_LEAST(3,3)
+#define ffts_ctzl __builtin_ctzl
+#elif defined(_MSC_VER)
+#include <intrin.h>
+#ifdef _M_X64
+#pragma intrinsic(_BitScanForward64)
+static __inline unsigned long ffts_ctzl(size_t N)
+{
+ unsigned long count;
+ _BitScanForward64((unsigned long*) &count, N);
+ return count;
+}
+#else
+#pragma intrinsic(_BitScanForward)
+static __inline unsigned long ffts_ctzl(size_t N)
+{
+ unsigned long count;
+ _BitScanForward((unsigned long*) &count, N);
+ return count;
+}
+#endif /* _WIN64 */
+#endif /* _MSC_VER */
+
+#endif /* FFTS_INTERNAL_H */
diff --git a/lib/ffts/src/ffts_nd.c b/lib/ffts/src/ffts_nd.c
index ae9b148..64220f1 100644
--- a/lib/ffts/src/ffts_nd.c
+++ b/lib/ffts/src/ffts_nd.c
@@ -1,282 +1,193 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#include "ffts_nd.h"
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2016, Jukka Ojanen <jukka.ojanen@kolumbus.fi>
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifdef HAVE_NEON
-#include "neon.h"
-#endif
-
-void ffts_free_nd(ffts_plan_t *p) {
-
- int i;
- for(i=0;i<p->rank;i++) {
-
- ffts_plan_t *x = p->plans[i];
- int k;
- for(k=0;k<i;k++) {
- if(p->Ms[i] == p->Ms[k]) x = NULL;
- }
-
- if(x) ffts_free(x);
- }
-
- free(p->Ns);
- free(p->Ms);
- free(p->plans);
- free(p->buf);
- free(p->transpose_buf);
- free(p);
-}
-#define TSIZE 8
-#include <string.h>
-void ffts_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t *buf) {
-
-#ifdef HAVE_NEON
- size_t i,j,k;
- int linebytes = w*8;
-
- for(j=0;j<h;j+=8) {
- for(i=0;i<w;i+=8) {
- neon_transpose_to_buf(in + j*w + i, buf, w);
-
- uint64_t *p = out + i*h + j;
- uint64_t *pbuf = buf;
- uint64_t *ptemp;
-
- __asm__ __volatile__(
- "mov %[ptemp], %[p]\n\t"
- "add %[p], %[p], %[w], lsl #3\n\t"
- "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t"
- "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t"
- "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t"
- "mov %[ptemp], %[p]\n\t"
- "add %[p], %[p], %[w], lsl #3\n\t"
- "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t"
- "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t"
- "mov %[ptemp], %[p]\n\t"
- "add %[p], %[p], %[w], lsl #3\n\t"
- "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t"
- "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t"
- "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t"
- "mov %[ptemp], %[p]\n\t"
- "add %[p], %[p], %[w], lsl #3\n\t"
- "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t"
- "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t"
- "mov %[ptemp], %[p]\n\t"
- "add %[p], %[p], %[w], lsl #3\n\t"
- "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t"
- "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t"
- "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t"
- "mov %[ptemp], %[p]\n\t"
- "add %[p], %[p], %[w], lsl #3\n\t"
- "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t"
- "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t"
- "mov %[ptemp], %[p]\n\t"
- "add %[p], %[p], %[w], lsl #3\n\t"
- "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t"
- "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t"
- "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t"
- "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t"
- "mov %[ptemp], %[p]\n\t"
- "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t"
- "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t"
-
- : [p] "+r" (p), [pbuf] "+r" (pbuf), [ptemp] "+r" (ptemp)
- : [w] "r" (w)
- : "memory", "q8", "q9", "q10", "q11"
- );
-// out[i*h + j] = in[j*w + i];
- }
- }
-#else
-#ifdef HAVE_SSE
- uint64_t tmp[TSIZE*TSIZE] __attribute__((aligned(64)));
- int tx, ty;
- int x, y;
- int tw = w / TSIZE;
- int th = h / TSIZE;
- for (ty=0;ty<th;ty++) {
- for (tx=0;tx<tw;tx++) {
- uint64_t *ip0 = in + w*TSIZE*ty + tx * TSIZE;
- uint64_t *op0 = tmp;//out + h*TSIZE*tx + ty*TSIZE;
-
- // Copy/transpose to tmp
- for (y=0;y<TSIZE;y+=2) {
- //for (x=0;x<TSIZE;x+=2) {
- //op[x*TSIZE] = ip[x];
- __m128d q0 = _mm_load_pd((double *)(ip0 + 0*w));
- __m128d q1 = _mm_load_pd((double *)(ip0 + 1*w));
- __m128d q2 = _mm_load_pd((double *)(ip0 + 2*w));
- __m128d q3 = _mm_load_pd((double *)(ip0 + 3*w));
- __m128d q4 = _mm_load_pd((double *)(ip0 + 4*w));
- __m128d q5 = _mm_load_pd((double *)(ip0 + 5*w));
- __m128d q6 = _mm_load_pd((double *)(ip0 + 6*w));
- __m128d q7 = _mm_load_pd((double *)(ip0 + 7*w));
- ip0 += 2;
-
- __m128d t0 = _mm_shuffle_pd(q0, q1, _MM_SHUFFLE2(0, 0));
- __m128d t1 = _mm_shuffle_pd(q0, q1, _MM_SHUFFLE2(1, 1));
- __m128d t2 = _mm_shuffle_pd(q2, q3, _MM_SHUFFLE2(0, 0));
- __m128d t3 = _mm_shuffle_pd(q2, q3, _MM_SHUFFLE2(1, 1));
- __m128d t4 = _mm_shuffle_pd(q4, q5, _MM_SHUFFLE2(0, 0));
- __m128d t5 = _mm_shuffle_pd(q4, q5, _MM_SHUFFLE2(1, 1));
- __m128d t6 = _mm_shuffle_pd(q6, q7, _MM_SHUFFLE2(0, 0));
- __m128d t7 = _mm_shuffle_pd(q6, q7, _MM_SHUFFLE2(1, 1));
- //_mm_store_pd((double *)(op0 + y*h + x), t0);
- //_mm_store_pd((double *)(op0 + y*h + x + h), t1);
- _mm_store_pd((double *)(op0 + 0), t0);
- _mm_store_pd((double *)(op0 + 0 + TSIZE), t1);
- _mm_store_pd((double *)(op0 + 2 ), t2);
- _mm_store_pd((double *)(op0 + 2 + TSIZE), t3);
- _mm_store_pd((double *)(op0 + 4 ), t4);
- _mm_store_pd((double *)(op0 + 4 + TSIZE), t5);
- _mm_store_pd((double *)(op0 + 6 ), t6);
- _mm_store_pd((double *)(op0 + 6 + TSIZE), t7);
- //}
- op0 += 2*TSIZE;
- }
-
- op0 = out + h*tx*TSIZE + ty*TSIZE;
- ip0 = tmp;
- for (y=0;y<TSIZE;y+=1) {
- // memcpy(op0, ip0, TSIZE * sizeof(*ip0));
-
- __m128d q0 = _mm_load_pd((double *)(ip0 + 0));
- __m128d q1 = _mm_load_pd((double *)(ip0 + 2));
- __m128d q2 = _mm_load_pd((double *)(ip0 + 4));
- __m128d q3 = _mm_load_pd((double *)(ip0 + 6));
- _mm_store_pd((double *)(op0 + 0), q0);
- _mm_store_pd((double *)(op0 + 2), q1);
- _mm_store_pd((double *)(op0 + 4), q2);
- _mm_store_pd((double *)(op0 + 6), q3);
-
- op0 += h;
- ip0 += TSIZE;
- }
-
- }
- }
-/*
- size_t i,j;
- for(i=0;i<w;i+=2) {
- for(j=0;j<h;j+=2) {
-// out[i*h + j] = in[j*w + i];
- __m128d q0 = _mm_load_pd((double *)(in + j*w + i));
- __m128d q1 = _mm_load_pd((double *)(in + j*w + i + w));
- __m128d t0 = _mm_shuffle_pd(q0, q1, _MM_SHUFFLE2(0, 0));
- __m128d t1 = _mm_shuffle_pd(q0, q1, _MM_SHUFFLE2(1, 1));
- _mm_store_pd((double *)(out + i*h + j), t0);
- _mm_store_pd((double *)(out + i*h + j + h), t1);
- }
- }
*/
-#endif
-#endif
-
-}
-
-void ffts_execute_nd(ffts_plan_t *p, const void * in, void * out) {
-
- uint64_t *din = (uint64_t *)in;
- uint64_t *buf = p->buf;
- uint64_t *dout = (uint64_t *)out;
- size_t i,j;
- for(i=0;i<p->Ns[0];i++) {
- p->plans[0]->transform(p->plans[0], din + (i * p->Ms[0]), buf + (i * p->Ms[0]));
- }
- ffts_transpose(buf, dout, p->Ms[0], p->Ns[0], p->transpose_buf);
-
- for(i=1;i<p->rank;i++) {
- for(j=0;j<p->Ns[i];j++) {
- p->plans[i]->transform(p->plans[i], dout + (j * p->Ms[i]), buf + (j * p->Ms[i]));
- }
- ffts_transpose(buf, dout, p->Ms[i], p->Ns[i], p->transpose_buf);
- }
+#include "ffts_nd.h"
+#include "ffts_internal.h"
+#include "ffts_transpose.h"
+
+static void
+ffts_free_nd(ffts_plan_t *p)
+{
+ if (p->plans) {
+ int i, j;
+
+ for (i = 0; i < p->rank; i++) {
+ ffts_plan_t *plan = p->plans[i];
+
+ if (plan) {
+ for (j = 0; j < i; j++) {
+ if (p->Ns[i] == p->Ns[j]) {
+ plan = NULL;
+ break;
+ }
+ }
+
+ if (plan) {
+ ffts_free(plan);
+ }
+ }
+ }
+
+ free(p->plans);
+ }
+
+ if (p->Ns) {
+ free(p->Ns);
+ }
+
+ if (p->Ms) {
+ free(p->Ms);
+ }
+
+ if (p->buf) {
+ ffts_aligned_free(p->buf);
+ }
+
+ free(p);
}
-ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign) {
- size_t vol = 1;
+static void
+ffts_execute_nd(ffts_plan_t *p, const void *in, void *out)
+{
+ uint64_t *din = (uint64_t*) in;
+ uint64_t *buf = p->buf;
+ uint64_t *dout = (uint64_t*) out;
- ffts_plan_t *p = malloc(sizeof(ffts_plan_t));
+ ffts_plan_t *plan;
+ int i;
+ size_t j;
- p->transform = &ffts_execute_nd;
- p->destroy = &ffts_free_nd;
+ plan = p->plans[0];
+ for (j = 0; j < p->Ms[0]; j++) {
+ plan->transform(plan, din + (j * p->Ns[0]), buf + (j * p->Ns[0]));
+ }
- p->rank = rank;
- p->Ns = malloc(sizeof(size_t) * rank);
- p->Ms = malloc(sizeof(size_t) * rank);
- p->plans = malloc(sizeof(ffts_plan_t **) * rank);
- int i;
- for(i=0;i<rank;i++) {
- p->Ns[i] = Ns[i];
- vol *= Ns[i];
- }
- p->buf = valloc(sizeof(float) * 2 * vol);
+ ffts_transpose(buf, dout, p->Ns[0], p->Ms[0]);
- for(i=0;i<rank;i++) {
- p->Ms[i] = vol / p->Ns[i];
+ for (i = 1; i < p->rank; i++) {
+ plan = p->plans[i];
- p->plans[i] = NULL;
- int k;
- for(k=0;k<i;k++) {
- if(p->Ms[k] == p->Ms[i])
- p->plans[i] = p->plans[k];
- }
+ for (j = 0; j < p->Ms[i]; j++) {
+ plan->transform(plan, dout + (j * p->Ns[i]), buf + (j * p->Ns[i]));
+ }
- if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ms[i], sign);
- }
+ ffts_transpose(buf, dout, p->Ns[i], p->Ms[i]);
+ }
+}
- p->transpose_buf = valloc(sizeof(float) * 2 * 8 * 8);
- return p;
+FFTS_API ffts_plan_t*
+ffts_init_nd(int rank, size_t *Ns, int sign)
+{
+ ffts_plan_t *p;
+ size_t vol = 1;
+ int i, j;
+
+ if (!Ns) {
+ return NULL;
+ }
+
+ if (rank == 1) {
+ return ffts_init_1d(Ns[0], sign);
+ }
+
+ p = calloc(1, sizeof(*p));
+ if (!p) {
+ return NULL;
+ }
+
+ p->transform = &ffts_execute_nd;
+ p->destroy = &ffts_free_nd;
+ p->rank = rank;
+
+ p->Ms = malloc(rank * sizeof(*p->Ms));
+ if (!p->Ms) {
+ goto cleanup;
+ }
+
+ p->Ns = malloc(rank * sizeof(*p->Ns));
+ if (!p->Ns) {
+ goto cleanup;
+ }
+
+ /* reverse the order */
+ for (i = 0; i < rank; i++) {
+ size_t N = Ns[rank - i - 1];
+ p->Ns[i] = N;
+ vol *= N;
+ }
+
+ p->buf = ffts_aligned_malloc(2 * vol * sizeof(float));
+ if (!p->buf) {
+ goto cleanup;
+ }
+
+ p->plans = calloc(rank, sizeof(*p->plans));
+ if (!p->plans) {
+ goto cleanup;
+ }
+
+ for (i = 0; i < rank; i++) {
+ p->Ms[i] = vol / p->Ns[i];
+
+ for (j = 0; j < i; j++) {
+ if (p->Ns[i] == p->Ns[j]) {
+ p->plans[i] = p->plans[j];
+ break;
+ }
+ }
+
+ if (!p->plans[i]) {
+ p->plans[i] = ffts_init_1d(p->Ns[i], sign);
+ if (!p->plans) {
+ goto cleanup;
+ }
+ }
+ }
+
+ return p;
+
+cleanup:
+ ffts_free_nd(p);
+ return NULL;
}
+FFTS_API ffts_plan_t*
+ffts_init_2d(size_t N1, size_t N2, int sign)
+{
+ size_t Ns[2];
-ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign) {
- size_t Ns[2];
- Ns[0] = N1;
- Ns[1] = N2;
- return ffts_init_nd(2, Ns, sign);
+ Ns[0] = N1; /* x */
+ Ns[1] = N2; /* y */
+ return ffts_init_nd(2, Ns, sign);
}
diff --git a/lib/ffts/src/ffts_nd.h b/lib/ffts/src/ffts_nd.h
index 8f0c855..1998cc6 100644
--- a/lib/ffts/src/ffts_nd.h
+++ b/lib/ffts/src/ffts_nd.h
@@ -1,58 +1,50 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef __FFTS_ND_H__
-#define __FFTS_ND_H__
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdint.h>
-#include <stddef.h>
-#include <stdio.h>
+*/
-#include "ffts.h"
+#ifndef FFTS_ND_H
+#define FFTS_ND_H
-#ifdef HAVE_NEON
- #include <arm_neon.h>
-#endif
-#ifdef HAVE_SSE
- #include <xmmintrin.h>
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
#endif
-void ffts_free_nd(ffts_plan_t *p);
-void ffts_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t *buf);
+#include "ffts.h"
+#include <stddef.h>
-void ffts_execute_nd(ffts_plan_t *p, const void * in, void * out);
-ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign);
-ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign);
+ffts_plan_t*
+ffts_init_nd(int rank, size_t *Ns, int sign);
-#endif
+ffts_plan_t*
+ffts_init_2d(size_t N1, size_t N2, int sign);
+#endif /* FFTS_ND_H */
diff --git a/lib/ffts/src/ffts_real.c b/lib/ffts/src/ffts_real.c
index bdb6eac..0f87a12 100644
--- a/lib/ffts/src/ffts_real.c
+++ b/lib/ffts/src/ffts_real.c
@@ -1,226 +1,654 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+Copyright (c) 2015, Jukka Ojanen <jukka.ojanen@kolumbus.fi>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "ffts_real.h"
+#include "ffts_internal.h"
+#include "ffts_trig.h"
+
+#ifdef HAVE_NEON
+#include <arm_neon.h>
+#elif HAVE_SSE
+#include <xmmintrin.h>
+
+/* check if have SSE3 intrinsics */
+#ifdef HAVE_PMMINTRIN_H
+#include <pmmintrin.h>
+#elif HAVE_INTRIN_H
+#include <intrin.h>
+#else
+/* avoid using negative zero as some configurations have problems with those */
+static const FFTS_ALIGN(16) unsigned int sign_mask_even[4] = {
+ 0x80000000, 0, 0x80000000, 0
+};
+static const FFTS_ALIGN(16) unsigned int sign_mask_odd[4] = {
+ 0, 0x80000000, 0, 0x80000000
+};
+#endif
+#endif
+
+static void
+ffts_free_1d_real(ffts_plan_t *p)
+{
+ if (p->B) {
+ ffts_aligned_free(p->B);
+ }
+
+ if (p->A) {
+ ffts_aligned_free(p->A);
+ }
-void ffts_free_1d_real(ffts_plan_t *p) {
- ffts_free(p->plans[0]);
- free(p->A);
- free(p->B);
- free(p->plans);
- free(p->buf);
- free(p);
+ if (p->buf) {
+ ffts_aligned_free(p->buf);
+ }
+
+ if (p->plans[0]) {
+ ffts_free(p->plans[0]);
+ }
+
+ free(p);
}
-void ffts_execute_1d_real(ffts_plan_t *p, const void *vin, void *vout) {
- float *out = (float *)vout;
- float *buf = (float *)p->buf;
- float *A = p->A;
- float *B = p->B;
+static void
+ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output)
+{
+ float *const FFTS_RESTRICT out =
+ (float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_16(output);
+ float *const FFTS_RESTRICT buf =
+ (float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->buf);
+ const float *const FFTS_RESTRICT A =
+ (const float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->A);
+ const float *const FFTS_RESTRICT B =
+ (const float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->B);
+ const int N = (const int) p->N;
+ int i;
+
+#ifdef __ARM_NEON__
+ float *p_buf0 = buf;
+ float *p_buf1 = buf + N - 2;
+ float *p_out = out;
+#endif
- p->plans[0]->transform(p->plans[0], vin, buf);
+ /* we know this */
+ FFTS_ASSUME(N/2 > 0);
- size_t N = p->N;
- buf[N] = buf[0];
- buf[N+1] = buf[1];
+ p->plans[0]->transform(p->plans[0], input, buf);
- float *p_buf0 = buf;
- float *p_buf1 = buf + N - 2;
- float *p_out = out;
+#ifndef HAVE_SSE
+ buf[N + 0] = buf[0];
+ buf[N + 1] = buf[1];
+#endif
- size_t i;
#ifdef __ARM_NEON__
- for(i=0;i<N/2;i+=2) {
- __asm__ __volatile__ ("vld1.32 {q8}, [%[pa], :128]!\n\t"
- "vld1.32 {q9}, [%[pb], :128]!\n\t"
- "vld1.32 {q10}, [%[buf0], :128]!\n\t"
- "vld1.32 {q11}, [%[buf1], :64]\n\t"
- "sub %[buf1], %[buf1], #16\n\t"
-
- "vdup.32 d26, d16[1]\n\t"
- "vdup.32 d27, d17[1]\n\t"
- "vdup.32 d24, d16[0]\n\t"
- "vdup.32 d25, d17[0]\n\t"
-
- "vdup.32 d30, d23[1]\n\t"
- "vdup.32 d31, d22[1]\n\t"
- "vdup.32 d28, d23[0]\n\t"
- "vdup.32 d29, d22[0]\n\t"
-
- "vmul.f32 q13, q13, q10\n\t"
- "vmul.f32 q15, q15, q9\n\t"
- "vmul.f32 q12, q12, q10\n\t"
- "vmul.f32 q14, q14, q9\n\t"
- "vrev64.f32 q13, q13\n\t"
- "vrev64.f32 q15, q15\n\t"
-
- "vtrn.32 d26, d27\n\t"
- "vtrn.32 d30, d31\n\t"
- "vneg.f32 d26, d26\n\t"
- "vneg.f32 d31, d31\n\t"
- "vtrn.32 d26, d27\n\t"
- "vtrn.32 d30, d31\n\t"
-
- "vadd.f32 q12, q12, q14\n\t"
- "vadd.f32 q13, q13, q15\n\t"
- "vadd.f32 q12, q12, q13\n\t"
- "vst1.32 {q12}, [%[pout], :128]!\n\t"
- : [pa] "+r" (A), [pb] "+r" (B), [buf0] "+r" (p_buf0), [buf1] "+r" (p_buf1),
- [pout] "+r" (p_out)
- :
- : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
- );
+ for (i = 0; i < N; i += 4) {
+ __asm__ __volatile__ (
+ "vld1.32 {q8}, [%[pa]]!\n\t"
+ "vld1.32 {q9}, [%[pb]]!\n\t"
+ "vld1.32 {q10}, [%[buf0]]!\n\t"
+ "vld1.32 {q11}, [%[buf1]]\n\t"
+ "sub %[buf1], %[buf1], #16\n\t"
+
+ "vdup.32 d26, d16[1]\n\t"
+ "vdup.32 d27, d17[1]\n\t"
+ "vdup.32 d24, d16[0]\n\t"
+ "vdup.32 d25, d17[0]\n\t"
+
+ "vdup.32 d30, d23[1]\n\t"
+ "vdup.32 d31, d22[1]\n\t"
+ "vdup.32 d28, d23[0]\n\t"
+ "vdup.32 d29, d22[0]\n\t"
+
+ "vmul.f32 q13, q13, q10\n\t"
+ "vmul.f32 q15, q15, q9\n\t"
+ "vmul.f32 q12, q12, q10\n\t"
+ "vmul.f32 q14, q14, q9\n\t"
+ "vrev64.f32 q13, q13\n\t"
+ "vrev64.f32 q15, q15\n\t"
+
+ "vtrn.32 d26, d27\n\t"
+ "vtrn.32 d30, d31\n\t"
+ "vneg.f32 d26, d26\n\t"
+ "vneg.f32 d31, d31\n\t"
+ "vtrn.32 d26, d27\n\t"
+ "vtrn.32 d30, d31\n\t"
+
+ "vadd.f32 q12, q12, q14\n\t"
+ "vadd.f32 q13, q13, q15\n\t"
+ "vadd.f32 q12, q12, q13\n\t"
+ "vst1.32 {q12}, [%[pout]]!\n\t"
+ : [buf0] "+r" (p_buf0), [buf1] "+r" (p_buf1), [pout] "+r" (p_out)
+ : [pa] "r" (A), [pb] "r" (B)
+ : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+ }
+#elif HAVE_SSE3
+ if (FFTS_UNLIKELY(N <= 8)) {
+ __m128 t0 = _mm_load_ps(buf);
+ __m128 t1 = _mm_load_ps(buf + N - 4);
+ __m128 t2 = _mm_load_ps(A);
+ __m128 t3 = _mm_load_ps(B);
+
+ _mm_store_ps(out, _mm_add_ps(_mm_addsub_ps(
+ _mm_mul_ps(t0, _mm_moveldup_ps(t2)),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t2))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(2,2,0,0)), t3))));
+
+ if (N == 8) {
+ t2 = _mm_load_ps(A + 4);
+ t3 = _mm_load_ps(B + 4);
+
+ _mm_store_ps(out + 4, _mm_add_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t2)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t2))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t1, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t0, _MM_SHUFFLE(2,2,0,0)), t3))));
+ }
+ } else {
+ __m128 t0 = _mm_load_ps(buf);
+
+ for (i = 0; i < N; i += 16) {
+ __m128 t1 = _mm_load_ps(buf + i);
+ __m128 t2 = _mm_load_ps(buf + N - i - 4);
+ __m128 t3 = _mm_load_ps(A + i);
+ __m128 t4 = _mm_load_ps(B + i);
+
+ _mm_store_ps(out + i, _mm_add_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4))));
+
+ t0 = _mm_load_ps(buf + N - i - 8);
+ t1 = _mm_load_ps(buf + i + 4);
+ t3 = _mm_load_ps(A + i + 4);
+ t4 = _mm_load_ps(B + i + 4);
+
+ _mm_store_ps(out + i + 4, _mm_add_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4))));
+
+ t1 = _mm_load_ps(buf + i + 8);
+ t2 = _mm_load_ps(buf + N - i - 12);
+ t3 = _mm_load_ps(A + i + 8);
+ t4 = _mm_load_ps(B + i + 8);
+
+ _mm_store_ps(out + i + 8, _mm_add_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4))));
+
+ t0 = _mm_load_ps(buf + N - i - 16);
+ t1 = _mm_load_ps(buf + i + 12);
+ t3 = _mm_load_ps(A + i + 12);
+ t4 = _mm_load_ps(B + i + 12);
+
+ _mm_store_ps(out + i + 12, _mm_add_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4))));
+ }
+ }
+#elif HAVE_SSE
+ if (FFTS_UNLIKELY(N <= 8)) {
+ __m128 c0 = _mm_load_ps((const float*) sign_mask_even);
+ __m128 t0 = _mm_load_ps(buf);
+ __m128 t1 = _mm_load_ps(buf + N - 4);
+ __m128 t2 = _mm_load_ps(A);
+ __m128 t3 = _mm_load_ps(B);
+
+ _mm_store_ps(out, _mm_add_ps(_mm_add_ps(_mm_add_ps(
+ _mm_mul_ps(t0, _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(2,2,0,0)), t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(_mm_xor_ps(t3, c0), _mm_xor_ps(t3, c0),
+ _MM_SHUFFLE(2,3,0,1)))));
+
+ if (N == 8) {
+ t2 = _mm_load_ps(A + 4);
+ t3 = _mm_load_ps(B + 4);
+
+ _mm_store_ps(out + 4, _mm_add_ps(_mm_add_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t0, _MM_SHUFFLE(2,2,0,0)), t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(_mm_xor_ps(t3, c0), _mm_xor_ps(t3, c0),
+ _MM_SHUFFLE(2,3,0,1)))));
+ }
+ } else {
+ __m128 c0 = _mm_load_ps((const float*) sign_mask_even);
+ __m128 t0 = _mm_load_ps(buf);
+
+ for (i = 0; i < N; i += 16) {
+ __m128 t1 = _mm_load_ps(buf + i);
+ __m128 t2 = _mm_load_ps(buf + N - i - 4);
+ __m128 t3 = _mm_load_ps(A + i);
+ __m128 t4 = _mm_load_ps(B + i);
+
+ _mm_store_ps(out + i, _mm_add_ps(_mm_add_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4)),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(_mm_xor_ps(t4, c0), _mm_xor_ps(t4, c0),
+ _MM_SHUFFLE(2,3,0,1)))));
+
+ t0 = _mm_load_ps(buf + N - i - 8);
+ t1 = _mm_load_ps(buf + i + 4);
+ t3 = _mm_load_ps(A + i + 4);
+ t4 = _mm_load_ps(B + i + 4);
+
+ _mm_store_ps(out + i + 4, _mm_add_ps(_mm_add_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(_mm_xor_ps(t4, c0), _mm_xor_ps(t4, c0),
+ _MM_SHUFFLE(2,3,0,1)))));
+
+ t1 = _mm_load_ps(buf + i + 8);
+ t2 = _mm_load_ps(buf + N - i - 12);
+ t3 = _mm_load_ps(A + i + 8);
+ t4 = _mm_load_ps(B + i + 8);
+
+ _mm_store_ps(out + i + 8, _mm_add_ps(_mm_add_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4)),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(_mm_xor_ps(t4, c0), _mm_xor_ps(t4, c0),
+ _MM_SHUFFLE(2,3,0,1)))));
+
+ t0 = _mm_load_ps(buf + N - i - 16);
+ t1 = _mm_load_ps(buf + i + 12);
+ t3 = _mm_load_ps(A + i + 12);
+ t4 = _mm_load_ps(B + i + 12);
+
+ _mm_store_ps(out + i + 12, _mm_add_ps(_mm_add_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(_mm_xor_ps(t4, c0), _mm_xor_ps(t4, c0),
+ _MM_SHUFFLE(2,3,0,1)))));
+ }
+ }
#else
- for(i=0;i<N/2;i++) {
- out[2*i] = buf[2*i]*A[2*i] - buf[2*i+1]*A[2*i+1] + buf[N-2*i]*B[2*i] + buf[N-2*i+1]*B[2*i+1];
- out[2*i+1] = buf[2*i+1]*A[2*i] + buf[2*i]*A[2*i+1] + buf[N-2*i]*B[2*i+1] - buf[N-2*i+1]*B[2*i];
-
-// out[2*N-2*i] = out[2*i];
-// out[2*N-2*i+1] = -out[2*i+1];
-
-#endif
- }
-
- out[N] = buf[0] - buf[1];
- out[N+1] = 0.0f;
-
+ for (i = 0; i < N/2; i++) {
+ out[2*i + 0] =
+ buf[ 2*i + 0] * A[2*i + 0] - buf[ 2*i + 1] * A[2*i + 1] +
+ buf[N - 2*i + 0] * B[2*i + 0] + buf[N - 2*i + 1] * B[2*i + 1];
+ out[2*i + 1] =
+ buf[ 2*i + 1] * A[2*i + 0] + buf[ 2*i + 0] * A[2*i + 1] +
+ buf[N - 2*i + 0] * B[2*i + 1] - buf[N - 2*i + 1] * B[2*i + 0];
+ }
+#endif
+
+ out[N + 0] = buf[0] - buf[1];
+ out[N + 1] = 0.0f;
}
-void ffts_execute_1d_real_inv(ffts_plan_t *p, const void *vin, void *vout) {
- float *out = (float *)vout;
- float *in = (float *)vin;
- float *buf = (float *)p->buf;
- float *A = p->A;
- float *B = p->B;
- size_t N = p->N;
-
- float *p_buf0 = in;
- float *p_buf1 = in + N - 2;
-
- float *p_out = buf;
-
- size_t i;
+static void
+ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output)
+{
+ float *const FFTS_RESTRICT in =
+ (float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_16(input);
+ float *const FFTS_RESTRICT buf =
+ (float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->buf);
+ const float *const FFTS_RESTRICT A =
+ (const float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->A);
+ const float *const FFTS_RESTRICT B =
+ (const float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->B);
+ const int N = (const int) p->N;
+ int i;
+
+#ifdef __ARM_NEON__
+ float *p_buf0 = in;
+ float *p_buf1 = in + N - 2;
+ float *p_out = buf;
+#endif
+
+ /* we know this */
+ FFTS_ASSUME(N/2 > 0);
+
#ifdef __ARM_NEON__
- for(i=0;i<N/2;i+=2) {
- __asm__ __volatile__ ("vld1.32 {q8}, [%[pa], :128]!\n\t"
- "vld1.32 {q9}, [%[pb], :128]!\n\t"
- "vld1.32 {q10}, [%[buf0], :128]!\n\t"
- "vld1.32 {q11}, [%[buf1], :64]\n\t"
- "sub %[buf1], %[buf1], #16\n\t"
-
- "vdup.32 d26, d16[1]\n\t"
- "vdup.32 d27, d17[1]\n\t"
- "vdup.32 d24, d16[0]\n\t"
- "vdup.32 d25, d17[0]\n\t"
-
- "vdup.32 d30, d23[1]\n\t"
- "vdup.32 d31, d22[1]\n\t"
- "vdup.32 d28, d23[0]\n\t"
- "vdup.32 d29, d22[0]\n\t"
-
- "vmul.f32 q13, q13, q10\n\t"
- "vmul.f32 q15, q15, q9\n\t"
- "vmul.f32 q12, q12, q10\n\t"
- "vmul.f32 q14, q14, q9\n\t"
- "vrev64.f32 q13, q13\n\t"
- "vrev64.f32 q15, q15\n\t"
-
- "vtrn.32 d26, d27\n\t"
- "vtrn.32 d28, d29\n\t"
- "vneg.f32 d27, d27\n\t"
- "vneg.f32 d29, d29\n\t"
- "vtrn.32 d26, d27\n\t"
- "vtrn.32 d28, d29\n\t"
-
- "vadd.f32 q12, q12, q14\n\t"
- "vsub.f32 q13, q13, q15\n\t"
- "vadd.f32 q12, q12, q13\n\t"
- "vst1.32 {q12}, [%[pout], :128]!\n\t"
- : [pa] "+r" (A), [pb] "+r" (B), [buf0] "+r" (p_buf0), [buf1] "+r" (p_buf1),
- [pout] "+r" (p_out)
- :
- : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
- );
+ for (i = 0; i < N/2; i += 2) {
+ __asm__ __volatile__ (
+ "vld1.32 {q8}, [%[pa]]!\n\t"
+ "vld1.32 {q9}, [%[pb]]!\n\t"
+ "vld1.32 {q10}, [%[buf0]]!\n\t"
+ "vld1.32 {q11}, [%[buf1]]\n\t"
+ "sub %[buf1], %[buf1], #16\n\t"
+
+ "vdup.32 d26, d16[1]\n\t"
+ "vdup.32 d27, d17[1]\n\t"
+ "vdup.32 d24, d16[0]\n\t"
+ "vdup.32 d25, d17[0]\n\t"
+
+ "vdup.32 d30, d23[1]\n\t"
+ "vdup.32 d31, d22[1]\n\t"
+ "vdup.32 d28, d23[0]\n\t"
+ "vdup.32 d29, d22[0]\n\t"
+
+ "vmul.f32 q13, q13, q10\n\t"
+ "vmul.f32 q15, q15, q9\n\t"
+ "vmul.f32 q12, q12, q10\n\t"
+ "vmul.f32 q14, q14, q9\n\t"
+ "vrev64.f32 q13, q13\n\t"
+ "vrev64.f32 q15, q15\n\t"
+
+ "vtrn.32 d26, d27\n\t"
+ "vtrn.32 d28, d29\n\t"
+ "vneg.f32 d27, d27\n\t"
+ "vneg.f32 d29, d29\n\t"
+ "vtrn.32 d26, d27\n\t"
+ "vtrn.32 d28, d29\n\t"
+
+ "vadd.f32 q12, q12, q14\n\t"
+ "vsub.f32 q13, q13, q15\n\t"
+ "vadd.f32 q12, q12, q13\n\t"
+ "vst1.32 {q12}, [%[pout]]!\n\t"
+ : [buf0] "+r" (p_buf0), [buf1] "+r" (p_buf1), [pout] "+r" (p_out)
+ : [pa] "r" (A), [pb] "r" (B)
+ : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+ }
+#elif HAVE_SSE3
+ if (FFTS_UNLIKELY(N <= 8)) {
+ __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]);
+ __m128 t1 = _mm_load_ps(in);
+ __m128 t2 = _mm_load_ps(in + N - 4);
+ __m128 t3 = _mm_load_ps(A);
+ __m128 t4 = _mm_load_ps(B);
+
+ _mm_store_ps(buf, _mm_sub_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4))));
+
+ if (N == 8) {
+ t3 = _mm_load_ps(A + 4);
+ t4 = _mm_load_ps(B + 4);
+
+ _mm_store_ps(buf + 4, _mm_sub_ps(_mm_addsub_ps(
+ _mm_mul_ps(t2, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t2, t1, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t1, _MM_SHUFFLE(2,2,0,0)), t4))));
+ }
+ } else {
+ __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]);
+
+ for (i = 0; i < N; i += 16) {
+ __m128 t1 = _mm_load_ps(in + i);
+ __m128 t2 = _mm_load_ps(in + N - i - 4);
+ __m128 t3 = _mm_load_ps(A + i);
+ __m128 t4 = _mm_load_ps(B + i);
+
+ _mm_store_ps(buf + i, _mm_sub_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4))));
+
+ t0 = _mm_load_ps(in + N - i - 8);
+ t1 = _mm_load_ps(in + i + 4);
+ t3 = _mm_load_ps(A + i + 4);
+ t4 = _mm_load_ps(B + i + 4);
+
+ _mm_store_ps(buf + i + 4, _mm_sub_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4))));
+
+ t1 = _mm_load_ps(in + i + 8);
+ t2 = _mm_load_ps(in + N - i - 12);
+ t3 = _mm_load_ps(A + i + 8);
+ t4 = _mm_load_ps(B + i + 8);
+
+ _mm_store_ps(buf + i + 8, _mm_sub_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4))));
+
+ t0 = _mm_load_ps(in + N - i - 16);
+ t1 = _mm_load_ps(in + i + 12);
+ t3 = _mm_load_ps(A + i + 12);
+ t4 = _mm_load_ps(B + i + 12);
+
+ _mm_store_ps(buf + i + 12, _mm_sub_ps(_mm_addsub_ps(
+ _mm_mul_ps(t1, _mm_moveldup_ps(t3)),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_movehdup_ps(t3))), _mm_addsub_ps(
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4))));
+ }
+ }
+#elif HAVE_SSE
+ if (FFTS_UNLIKELY(N <= 8)) {
+ __m128 c0 = _mm_load_ps((const float*) sign_mask_odd);
+ __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]);
+ __m128 t1 = _mm_load_ps(in);
+ __m128 t2 = _mm_load_ps(in + N - 4);
+ __m128 t3 = _mm_load_ps(A);
+ __m128 t4 = _mm_load_ps(B);
+ _mm_store_ps(buf, _mm_add_ps(_mm_sub_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)),
+ _mm_xor_ps(t4, c0))));
+ if (N == 8) {
+ t3 = _mm_load_ps(A + 4);
+ t4 = _mm_load_ps(B + 4);
+
+ _mm_store_ps(buf + 4, _mm_add_ps(_mm_sub_ps(_mm_add_ps(
+ _mm_mul_ps(t2, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t1, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t1, _MM_SHUFFLE(2,2,0,0)),
+ _mm_xor_ps(t4, c0))));
+ }
+ } else {
+ __m128 c0 = _mm_load_ps((const float*) sign_mask_odd);
+ __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]);
+
+ for (i = 0; i < N; i += 16) {
+ __m128 t1 = _mm_load_ps(in + i);
+ __m128 t2 = _mm_load_ps(in + N - i - 4);
+ __m128 t3 = _mm_load_ps(A + i);
+ __m128 t4 = _mm_load_ps(B + i);
+
+ _mm_store_ps(buf + i, _mm_add_ps(_mm_sub_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)),
+ _mm_xor_ps(t4, c0))));
+
+ t0 = _mm_load_ps(in + N - i - 8);
+ t1 = _mm_load_ps(in + i + 4);
+ t3 = _mm_load_ps(A + i + 4);
+ t4 = _mm_load_ps(B + i + 4);
+
+ _mm_store_ps(buf + i + 4, _mm_add_ps(_mm_sub_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)),
+ _mm_xor_ps(t4, c0))));
+
+ t1 = _mm_load_ps(in + i + 8);
+ t2 = _mm_load_ps(in + N - i - 12);
+ t3 = _mm_load_ps(A + i + 8);
+ t4 = _mm_load_ps(B + i + 8);
+
+ _mm_store_ps(buf + i + 8, _mm_add_ps(_mm_sub_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))),
+ _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)),
+ _mm_xor_ps(t4, c0))));
+
+ t0 = _mm_load_ps(in + N - i - 16);
+ t1 = _mm_load_ps(in + i + 12);
+ t3 = _mm_load_ps(A + i + 12);
+ t4 = _mm_load_ps(B + i + 12);
+
+ _mm_store_ps(buf + i + 12, _mm_add_ps(_mm_sub_ps(_mm_add_ps(
+ _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))),
+ _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)),
+ _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)),
+ _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))),
+ _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)),
+ _mm_xor_ps(t4, c0))));
+ }
+ }
#else
- for(i=0;i<N/2;i++) {
- buf[2*i] = in[2*i]*A[2*i] + in[2*i+1]*A[2*i+1] + in[N-2*i]*B[2*i] - in[N-2*i+1]*B[2*i+1];
- buf[2*i+1] = in[2*i+1]*A[2*i] - in[2*i]*A[2*i+1] - in[N-2*i]*B[2*i+1] - in[N-2*i+1]*B[2*i];
+ for (i = 0; i < N/2; i++) {
+ buf[2*i + 0] =
+ in[ 2*i + 0] * A[2*i + 0] + in[ 2*i + 1] * A[2*i + 1] +
+ in[N - 2*i + 0] * B[2*i + 0] - in[N - 2*i + 1] * B[2*i + 1];
+ buf[2*i + 1] =
+ in[ 2*i + 1] * A[2*i + 0] - in[ 2*i + 0] * A[2*i + 1] -
+ in[N - 2*i + 0] * B[2*i + 1] - in[N - 2*i + 1] * B[2*i + 0];
+ }
#endif
-}
-
- p->plans[0]->transform(p->plans[0], buf, out);
-
-}
-ffts_plan_t *ffts_init_1d_real(size_t N, int sign) {
- ffts_plan_t *p = malloc(sizeof(ffts_plan_t));
-
- if(sign < 0) p->transform = &ffts_execute_1d_real;
- else p->transform = &ffts_execute_1d_real_inv;
-
- p->destroy = &ffts_free_1d_real;
- p->N = N;
- p->rank = 1;
- p->plans = malloc(sizeof(ffts_plan_t **) * 1);
-
- p->plans[0] = ffts_init_1d(N/2, sign);
-
- p->buf = valloc(sizeof(float) * 2 * ((N/2) + 1));
-
- p->A = valloc(sizeof(float) * N);
- p->B = valloc(sizeof(float) * N);
-
- if(sign < 0) {
- int i;
- for (i = 0; i < N/2; i++) {
- p->A[2 * i] = 0.5 * (1.0 - sin (2.0f * PI / (double) (N) * (double) i));
- p->A[2 * i + 1] = 0.5 * (-1.0 * cos (2.0f * PI / (double) (N) * (double) i));
- p->B[2 * i] = 0.5 * (1.0 + sin (2.0f * PI / (double) (N) * (double) i));
- p->B[2 * i + 1] = 0.5 * (1.0 * cos (2.0f * PI / (double) (N) * (double) i));
- }
- }else{
- int i;
- for (i = 0; i < N/2; i++) {
- p->A[2 * i] = 1.0 * (1.0 - sin (2.0f * PI / (double) (N) * (double) i));
- p->A[2 * i + 1] = 1.0 * (-1.0 * cos (2.0f * PI / (double) (N) * (double) i));
- p->B[2 * i] = 1.0 * (1.0 + sin (2.0f * PI / (double) (N) * (double) i));
- p->B[2 * i + 1] = 1.0 * (1.0 * cos (2.0f * PI / (double) (N) * (double) i));
- }
- }
-
- return p;
+ p->plans[0]->transform(p->plans[0], buf, output);
}
+FFTS_API ffts_plan_t*
+ffts_init_1d_real(size_t N, int sign)
+{
+ ffts_plan_t *p;
+
+ p = (ffts_plan_t*) calloc(1, sizeof(*p) + sizeof(*p->plans));
+ if (!p) {
+ return NULL;
+ }
+
+ if (sign < 0) {
+ p->transform = &ffts_execute_1d_real;
+ } else {
+ p->transform = &ffts_execute_1d_real_inv;
+ }
+
+ p->destroy = &ffts_free_1d_real;
+ p->N = N;
+ p->rank = 1;
+ p->plans = (ffts_plan_t**) &p[1];
+
+ p->plans[0] = ffts_init_1d(N/2, sign);
+ if (!p->plans[0]) {
+ goto cleanup;
+ }
+
+ p->buf = ffts_aligned_malloc(2 * ((N/2) + 1) * sizeof(float));
+ if (!p->buf) {
+ goto cleanup;
+ }
+
+ p->A = (float*) ffts_aligned_malloc(N * sizeof(float));
+ if (!p->A) {
+ goto cleanup;
+ }
+
+ p->B = (float*) ffts_aligned_malloc(N * sizeof(float));
+ if (!p->B) {
+ goto cleanup;
+ }
+
+#ifdef HAVE_SSE3
+ ffts_generate_table_1d_real_32f(p, sign, 1);
+#else
+ ffts_generate_table_1d_real_32f(p, sign, 0);
+#endif
+
+ return p;
+cleanup:
+ ffts_free_1d_real(p);
+ return NULL;
+} \ No newline at end of file
diff --git a/lib/ffts/src/ffts_real.h b/lib/ffts/src/ffts_real.h
index bf8834d..61d03d4 100644
--- a/lib/ffts/src/ffts_real.h
+++ b/lib/ffts/src/ffts_real.h
@@ -1,53 +1,47 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef __FFTS_REAL_H__
-#define __FFTS_REAL_H__
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdint.h>
-#include <stddef.h>
-#include <stdio.h>
+*/
-#include "ffts.h"
+#ifndef FFTS_REAL_H
+#define FFTS_REAL_H
-#ifdef HAVE_NEON
- #include <arm_neon.h>
-#endif
-#ifdef HAVE_SSE
- #include <xmmintrin.h>
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
#endif
-ffts_plan_t *ffts_init_1d_real(size_t N, int sign);
+#include "ffts.h"
+#include <stddef.h>
-#endif
+ffts_plan_t*
+ffts_init_1d_real(size_t N, int sign);
+#endif /* FFTS_REAL_H */
diff --git a/lib/ffts/src/ffts_real_nd.c b/lib/ffts/src/ffts_real_nd.c
index bf46254..89ef7f7 100644
--- a/lib/ffts/src/ffts_real_nd.c
+++ b/lib/ffts/src/ffts_real_nd.c
@@ -1,177 +1,269 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "ffts_real_nd.h"
+#include "ffts_real.h"
+#include "ffts_internal.h"
+#include "ffts_transpose.h"
-#ifdef __ARM_NEON__
-#include "neon.h"
-#endif
-
-void ffts_free_nd_real(ffts_plan_t *p) {
-
- int i;
- for(i=0;i<p->rank;i++) {
-
- ffts_plan_t *x = p->plans[i];
-
- int k;
- for(k=i+1;k<p->rank;k++) {
- if(x == p->plans[k]) p->plans[k] = NULL;
- }
-
- if(x) ffts_free(x);
- }
-
- free(p->Ns);
- free(p->Ms);
- free(p->plans);
- free(p->buf);
- free(p->transpose_buf);
- free(p);
-}
+static void
+ffts_free_nd_real(ffts_plan_t *p)
+{
+ if (p->plans) {
+ int i, j;
+
+ for (i = 0; i < p->rank; i++) {
+ ffts_plan_t *plan = p->plans[i];
+
+ if (plan) {
+ for (j = 0; j < i; j++) {
+ if (p->Ns[i] == p->Ns[j]) {
+ plan = NULL;
+ break;
+ }
+ }
+
+ if (plan) {
+ ffts_free(plan);
+ }
+ }
+ }
-void ffts_scalar_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t *buf) {
+ free(p->plans);
+ }
- size_t i,j;
- for(i=0;i<w;i+=1) {
- for(j=0;j<h;j+=1) {
- out[i*h + j] = in[j*w + i];
- }
- }
+ if (p->buf) {
+ ffts_aligned_free(p->buf);
+ }
+ if (p->Ns) {
+ free(p->Ns);
+ }
+
+ if (p->Ms) {
+ free(p->Ms);
+ }
+
+ free(p);
}
-void ffts_execute_nd_real(ffts_plan_t *p, const void * in, void * out) {
+static void
+ffts_execute_nd_real(ffts_plan_t *p, const void *in, void *out)
+{
+ const size_t Ms0 = p->Ms[0];
+ const size_t Ns0 = p->Ns[0];
+
+ uint32_t *din = (uint32_t*) in;
+ uint64_t *buf = p->buf;
+ uint64_t *dout = (uint64_t*) out;
+
+ ffts_plan_t *plan;
+ int i;
+ size_t j;
+
+ plan = p->plans[0];
+ for (j = 0; j < Ns0; j++) {
+ plan->transform(plan, din + (j * Ms0), buf + (j * (Ms0 / 2 + 1)));
+ }
- uint32_t *din = (uint32_t *)in;
- uint64_t *buf = p->buf;
- uint64_t *dout = (uint64_t *)out;
+ ffts_transpose(buf, dout, Ms0 / 2 + 1, Ns0);
- size_t i,j;
- for(i=0;i<p->Ns[0];i++) {
- p->plans[0]->transform(p->plans[0], din + (i * p->Ms[0]), buf + (i * (p->Ms[0] / 2 + 1)));
- }
- ffts_scalar_transpose(buf, dout, p->Ms[0] / 2 + 1, p->Ns[0], p->transpose_buf);
+ for (i = 1; i < p->rank; i++) {
+ const size_t Ms = p->Ms[i];
+ const size_t Ns = p->Ns[i];
- for(i=1;i<p->rank;i++) {
- for(j=0;j<p->Ns[i];j++) {
- p->plans[i]->transform(p->plans[i], dout + (j * p->Ms[i]), buf + (j * p->Ms[i]));
- }
- ffts_scalar_transpose(buf, dout, p->Ms[i], p->Ns[i], p->transpose_buf);
- }
+ plan = p->plans[i];
+
+ for (j = 0; j < Ns; j++) {
+ plan->transform(plan, dout + (j * Ms), buf + (j * Ms));
+ }
+
+ ffts_transpose(buf, dout, Ms, Ns);
+ }
}
-void ffts_execute_nd_real_inv(ffts_plan_t *p, const void * in, void * out) {
-
- uint64_t *din = (uint64_t *)in;
- uint64_t *buf = p->buf;
- uint64_t *dout = (uint64_t *)out;
-
- float *bufr = (float *)(p->buf);
- float *doutr = (float *)out;
-
- size_t i,j;
- ffts_scalar_transpose(din, buf, p->Ms[0], p->Ns[0], p->transpose_buf);
-
- for(i=0;i<p->Ms[0];i++) {
- p->plans[0]->transform(p->plans[0], buf + (i * p->Ns[0]), dout + (i * p->Ns[0]));
- }
-
- ffts_scalar_transpose(dout, buf, p->Ns[0], p->Ms[0], p->transpose_buf);
- for(j=0;j<p->Ms[1];j++) {
- p->plans[1]->transform(p->plans[1], buf + (j * (p->Ms[0])), &doutr[j * p->Ns[1]]);
- }
+static void
+ffts_execute_nd_real_inv(ffts_plan_t *p, const void *in, void *out)
+{
+ const size_t Ms0 = p->Ms[0];
+ const size_t Ms1 = p->Ms[1];
+ const size_t Ns0 = p->Ns[0];
+ const size_t Ns1 = p->Ns[1];
+
+ uint64_t *din = (uint64_t*) in;
+ uint64_t *buf = p->buf;
+ uint64_t *buf2;
+ float *doutr = (float*) out;
+
+ ffts_plan_t *plan;
+ size_t vol;
+
+ int i;
+ size_t j;
+
+ vol = p->Ns[0];
+ for (i = 1; i < p->rank; i++) {
+ vol *= p->Ns[i];
+ }
+
+ buf2 = buf + vol;
+
+ ffts_transpose(din, buf, Ms0, Ns0);
+
+ plan = p->plans[0];
+ for (j = 0; j < Ms0; j++) {
+ plan->transform(plan, buf + (j * Ns0), buf2 + (j * Ns0));
+ }
+
+ ffts_transpose(buf2, buf, Ns0, Ms0);
+
+ plan = p->plans[1];
+ for (j = 0; j < Ms1; j++) {
+ plan->transform(plan, buf + (j * Ms0), &doutr[j * Ns1]);
+ }
}
-ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign) {
- size_t vol = 1;
+FFTS_API ffts_plan_t*
+ffts_init_nd_real(int rank, size_t *Ns, int sign)
+{
+ int i;
+ size_t vol = 1;
+ size_t bufsize;
+ ffts_plan_t *p;
- ffts_plan_t *p = malloc(sizeof(ffts_plan_t));
+ p = (ffts_plan_t*) calloc(1, sizeof(*p));
+ if (!p) {
+ return NULL;
+ }
- if(sign < 0) p->transform = &ffts_execute_nd_real;
- else p->transform = &ffts_execute_nd_real_inv;
+ if (sign < 0) {
+ p->transform = &ffts_execute_nd_real;
+ } else {
+ p->transform = &ffts_execute_nd_real_inv;
+ }
- p->destroy = &ffts_free_nd_real;
+ p->destroy = &ffts_free_nd_real;
+ p->rank = rank;
- p->rank = rank;
- p->Ns = malloc(sizeof(size_t) * rank);
- p->Ms = malloc(sizeof(size_t) * rank);
- p->plans = malloc(sizeof(ffts_plan_t **) * rank);
- int i;
- for(i=0;i<rank;i++) {
- p->Ns[i] = Ns[i];
- vol *= Ns[i];
- }
- p->buf = valloc(sizeof(float) * 2 * vol);
+ p->Ms = (size_t*) malloc(rank * sizeof(*p->Ms));
+ if (!p->Ms) {
+ goto cleanup;
+ }
- for(i=0;i<rank;i++) {
- p->Ms[i] = vol / p->Ns[i];
-
- p->plans[i] = NULL;
- int k;
+ p->Ns = (size_t*) malloc(rank * sizeof(*p->Ns));
+ if (!p->Ns) {
+ goto cleanup;
+ }
- if(sign < 0) {
- for(k=1;k<i;k++) {
- if(p->Ms[k] == p->Ms[i]) p->plans[i] = p->plans[k];
- }
- if(!i) p->plans[i] = ffts_init_1d_real(p->Ms[i], sign);
- else if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ms[i], sign);
- }else{
- for(k=0;k<i;k++) {
- if(p->Ns[k] == p->Ns[i]) p->plans[i] = p->plans[k];
- }
- if(i==rank-1) p->plans[i] = ffts_init_1d_real(p->Ns[i], sign);
- else if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ns[i], sign);
- }
- }
- if(sign < 0) {
- for(i=1;i<rank;i++) {
- p->Ns[i] = p->Ns[i] / 2 + 1;
- }
- }else{
- for(i=0;i<rank-1;i++) {
- p->Ms[i] = p->Ms[i] / 2 + 1;
- }
- }
-
- p->transpose_buf = valloc(sizeof(float) * 2 * 8 * 8);
- return p;
+ for (i = 0; i < rank; i++) {
+ p->Ns[i] = Ns[i];
+ vol *= Ns[i];
+ }
+
+ /* there is probably a prettier way of doing this, but it works.. */
+ if (sign < 0) {
+ bufsize = 2 * vol;
+ } else {
+ bufsize = 2 * (Ns[0] * ((vol / Ns[0]) / 2 + 1) + vol);
+ }
+
+ p->buf = ffts_aligned_malloc(bufsize * sizeof(float));
+ if (!p->buf) {
+ goto cleanup;
+ }
+
+ p->plans = (ffts_plan_t**) calloc(rank, sizeof(*p->plans));
+ if (!p->plans) {
+ goto cleanup;
+ }
+
+ for (i = 0; i < rank; i++) {
+ int k;
+
+ p->Ms[i] = vol / p->Ns[i];
+
+ if (sign < 0) {
+ if (!i) {
+ p->plans[i] = ffts_init_1d_real(p->Ms[i], sign);
+ } else {
+ for (k = 1; k < i; k++) {
+ if (p->Ms[k] == p->Ms[i]) {
+ p->plans[i] = p->plans[k];
+ break;
+ }
+ }
+
+ if (!p->plans[i]) {
+ p->plans[i] = ffts_init_1d(p->Ms[i], sign);
+ p->Ns[i] = p->Ns[i] / 2 + 1;
+ }
+ }
+ } else {
+ if (i == rank - 1) {
+ p->plans[i] = ffts_init_1d_real(p->Ns[i], sign);
+ } else {
+ for (k = 0; k < i; k++) {
+ if (p->Ns[k] == p->Ns[i]) {
+ p->plans[i] = p->plans[k];
+ break;
+ }
+ }
+
+ if (!p->plans[i]) {
+ p->plans[i] = ffts_init_1d(p->Ns[i], sign);
+ p->Ms[i] = p->Ms[i] / 2 + 1;
+ }
+ }
+ }
+
+ if (!p->plans[i]) {
+ goto cleanup;
+ }
+ }
+
+ return p;
+
+cleanup:
+ ffts_free_nd_real(p);
+ return NULL;
}
+FFTS_API ffts_plan_t*
+ffts_init_2d_real(size_t N1, size_t N2, int sign)
+{
+ size_t Ns[2];
-ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign) {
- size_t Ns[2];
- Ns[0] = N1;
- Ns[1] = N2;
- return ffts_init_nd_real(2, Ns, sign);
+ Ns[0] = N1;
+ Ns[1] = N2;
+ return ffts_init_nd_real(2, Ns, sign);
}
diff --git a/lib/ffts/src/ffts_real_nd.h b/lib/ffts/src/ffts_real_nd.h
index d777d42..fac607b 100644
--- a/lib/ffts/src/ffts_real_nd.h
+++ b/lib/ffts/src/ffts_real_nd.h
@@ -1,53 +1,50 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __FFTS_REAL_ND_H__
-#define __FFTS_REAL_ND_H__
+#ifndef FFTS_REAL_ND_H
+#define FFTS_REAL_ND_H
-#include <stdint.h>
-#include <stddef.h>
-#include <stdio.h>
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
-#include "ffts_nd.h"
-#include "ffts_real.h"
#include "ffts.h"
+#include <stddef.h>
-#ifdef HAVE_NEON
- #include <arm_neon.h>
-#endif
-#ifdef HAVE_SSE
- #include <xmmintrin.h>
-#endif
+ffts_plan_t*
+ffts_init_nd_real(int rank, size_t *Ns, int sign);
-#endif
+ffts_plan_t*
+ffts_init_2d_real(size_t N1, size_t N2, int sign);
+#endif /* FFTS_REAL_ND_H */ \ No newline at end of file
diff --git a/lib/ffts/src/ffts_small.c b/lib/ffts/src/ffts_small.c
deleted file mode 100644
index ddd2d3e..0000000
--- a/lib/ffts/src/ffts_small.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2013, Michael J. Cree <mcree@orcon.net.nz>
- Copyright (c) 2012, 2013, Anthony M. Blake <amb@anthonix.com>
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#include "ffts.h"
-#include "macros.h"
-
-#include <stdlib.h>
-
-#define DEBUG(x)
-
-#include "ffts_small.h"
-
- void firstpass_16_f(ffts_plan_t * p, const void * in, void * out)
-{
- const data_t *din = (const data_t *)in;
- data_t *dout = (data_t *)out;
- V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15;
- float *LUT8 = p->ws;
-
- L_4_4(0, din+0,din+16,din+8,din+24,&r0_1,&r2_3,&r8_9,&r10_11);
- L_2_4(0, din+4,din+20,din+28,din+12,&r4_5,&r6_7,&r14_15,&r12_13);
- K_N(0, VLD(LUT8),VLD(LUT8+4),&r0_1,&r2_3,&r4_5,&r6_7);
- K_N(0, VLD(LUT8+8),VLD(LUT8+12),&r0_1,&r4_5,&r8_9,&r12_13);
- S_4(r0_1,r4_5,r8_9,r12_13,dout+0,dout+8,dout+16,dout+24);
- K_N(0, VLD(LUT8+16),VLD(LUT8+20),&r2_3,&r6_7,&r10_11,&r14_15);
- S_4(r2_3,r6_7,r10_11,r14_15,dout+4,dout+12,dout+20,dout+28);
-}
-
- void firstpass_16_b(ffts_plan_t * p, const void * in, void * out)
-{
- const data_t *din = (const data_t *)in;
- data_t *dout = (data_t *)out;
- V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15;
- float *LUT8 = p->ws;
-
- L_4_4(1, din+0,din+16,din+8,din+24,&r0_1,&r2_3,&r8_9,&r10_11);
- L_2_4(1, din+4,din+20,din+28,din+12,&r4_5,&r6_7,&r14_15,&r12_13);
- K_N(1, VLD(LUT8),VLD(LUT8+4),&r0_1,&r2_3,&r4_5,&r6_7);
- K_N(1, VLD(LUT8+8),VLD(LUT8+12),&r0_1,&r4_5,&r8_9,&r12_13);
- S_4(r0_1,r4_5,r8_9,r12_13,dout+0,dout+8,dout+16,dout+24);
- K_N(1, VLD(LUT8+16),VLD(LUT8+20),&r2_3,&r6_7,&r10_11,&r14_15);
- S_4(r2_3,r6_7,r10_11,r14_15,dout+4,dout+12,dout+20,dout+28);
-}
-
-
- void firstpass_8_f(ffts_plan_t *p, const void *in, void *out)
-{
- const data_t *din = (const data_t *)in;
- data_t *dout = (data_t *)out;
- V r0_1, r2_3, r4_5, r6_7;
- float *LUT8 = p->ws + p->ws_is[0];
-
- L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7);
- K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7);
- S_4(r0_1,r2_3,r4_5,r6_7,dout+0,dout+4,dout+8,dout+12);
-}
-
- void firstpass_8_b(ffts_plan_t *p, const void *in, void *out)
-{
- const data_t *din = (const data_t *)in;
- data_t *dout = (data_t *)out;
- V r0_1, r2_3, r4_5, r6_7;
- float *LUT8 = p->ws + p->ws_is[0];
-
- L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7);
- K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7);
- S_4(r0_1,r2_3,r4_5,r6_7,dout+0,dout+4,dout+8,dout+12);
-}
-
-
- void firstpass_4_f(ffts_plan_t *p, const void *in, void *out)
-{
- const data_t *din = (const data_t *)in;
- data_t *dout = (data_t *)out;
- cdata_t t0, t1, t2, t3, t4, t5, t6, t7;
- t0[0] = din[0]; t0[1] = din[1];
- t1[0] = din[4]; t1[1] = din[5];
- t2[0] = din[2]; t2[1] = din[3];
- t3[0] = din[6]; t3[1] = din[7];
-
- t4[0] = t0[0] + t1[0]; t4[1] = t0[1] + t1[1];
- t5[0] = t0[0] - t1[0]; t5[1] = t0[1] - t1[1];
- t6[0] = t2[0] + t3[0]; t6[1] = t2[1] + t3[1];
- t7[0] = t2[0] - t3[0]; t7[1] = t2[1] - t3[1];
-
- dout[0] = t4[0] + t6[0]; dout[1] = t4[1] + t6[1];
- dout[4] = t4[0] - t6[0]; dout[5] = t4[1] - t6[1];
- dout[2] = t5[0] + t7[1]; dout[3] = t5[1] - t7[0];
- dout[6] = t5[0] - t7[1]; dout[7] = t5[1] + t7[0];
-}
-
- void firstpass_4_b(ffts_plan_t *p, const void *in, void *out)
-{
- const data_t *din = (const data_t *)in;
- data_t *dout = (data_t *)out;
- cdata_t t0, t1, t2, t3, t4, t5, t6, t7;
- t0[0] = din[0]; t0[1] = din[1];
- t1[0] = din[4]; t1[1] = din[5];
- t2[0] = din[2]; t2[1] = din[3];
- t3[0] = din[6]; t3[1] = din[7];
-
- t4[0] = t0[0] + t1[0]; t4[1] = t0[1] + t1[1];
- t5[0] = t0[0] - t1[0]; t5[1] = t0[1] - t1[1];
- t6[0] = t2[0] + t3[0]; t6[1] = t2[1] + t3[1];
- t7[0] = t2[0] - t3[0]; t7[1] = t2[1] - t3[1];
-
- dout[0] = t4[0] + t6[0]; dout[1] = t4[1] + t6[1];
- dout[4] = t4[0] - t6[0]; dout[5] = t4[1] - t6[1];
- dout[2] = t5[0] - t7[1]; dout[3] = t5[1] + t7[0];
- dout[6] = t5[0] + t7[1]; dout[7] = t5[1] - t7[0];
-}
-
- void firstpass_2(ffts_plan_t *p, const void *in, void *out)
-{
- const data_t *din = (const data_t *)in;
- data_t *dout = (data_t *)out;
- cdata_t t0, t1, r0,r1;
- t0[0] = din[0]; t0[1] = din[1];
- t1[0] = din[2]; t1[1] = din[3];
- r0[0] = t0[0] + t1[0];
- r0[1] = t0[1] + t1[1];
- r1[0] = t0[0] - t1[0];
- r1[1] = t0[1] - t1[1];
- dout[0] = r0[0]; dout[1] = r0[1];
- dout[2] = r1[0]; dout[3] = r1[1];
-}
diff --git a/lib/ffts/src/ffts_small.h b/lib/ffts/src/ffts_small.h
deleted file mode 100644
index 76cadf5..0000000
--- a/lib/ffts/src/ffts_small.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __FFTS_SMALL_H__
-#define __FFTS_SMALL_H__
-
-
-void firstpass_16_f(ffts_plan_t * p, const void * in, void * out);
-void firstpass_16_b(ffts_plan_t * p, const void * in, void * out);
-void firstpass_8_f(ffts_plan_t * p, const void * in, void * out);
-void firstpass_8_b(ffts_plan_t * p, const void * in, void * out);
-void firstpass_4_f(ffts_plan_t * p, const void * in, void * out);
-void firstpass_4_b(ffts_plan_t * p, const void * in, void * out);
-void firstpass_2(ffts_plan_t * p, const void * in, void * out);
-
-#endif
diff --git a/lib/ffts/src/ffts_static.c b/lib/ffts/src/ffts_static.c
index 3edf2ea..09de6d7 100644
--- a/lib/ffts/src/ffts_static.c
+++ b/lib/ffts/src/ffts_static.c
@@ -1,101 +1,1234 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
#include "ffts_static.h"
-void ffts_static_rec_i(ffts_plan_t *p, float *data, size_t N) {
- if(N > 16) {
- size_t N1 = N >> 1;
- size_t N2 = N >> 2;
- size_t N3 = N >> 3;
- float *ws = ((float *)(p->ws)) + (p->ws_is[__builtin_ctzl(N)-4] << 1);
+#include "ffts_internal.h"
+#include "macros.h"
+
+#if defined(HAVE_NEON)
+#include "neon.h"
+#endif
+
+#include <assert.h>
+
+static const FFTS_ALIGN(16) float ffts_constants_small_32f[24] = {
+ 1.0f,
+ 1.0f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+
+ -0.0f,
+ 0.0f,
+ -0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+
+ 1.0f,
+ 1.0f,
+ 0.9238795325112867561281831893967882868224166258636425f,
+ 0.9238795325112867561281831893967882868224166258636425f,
+
+ -0.0f,
+ 0.0f,
+ -0.3826834323650897717284599840303988667613445624856270f,
+ 0.3826834323650897717284599840303988667613445624856270f,
+
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.3826834323650897717284599840303988667613445624856270f,
+ 0.3826834323650897717284599840303988667613445624856270f,
+
+ -0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ -0.9238795325112867561281831893967882868224166258636425f,
+ 0.9238795325112867561281831893967882868224166258636425f
+};
+
+static const FFTS_ALIGN(16) double ffts_constants_small_64f[24] = {
+ 1.0,
+ 1.0,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+
+ -0.0,
+ 0.0,
+ -0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+
+ 1.0,
+ 1.0,
+ 0.9238795325112867561281831893967882868224166258636425,
+ 0.9238795325112867561281831893967882868224166258636425,
+
+ -0.0,
+ 0.0,
+ -0.3826834323650897717284599840303988667613445624856270,
+ 0.3826834323650897717284599840303988667613445624856270,
+
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.3826834323650897717284599840303988667613445624856270,
+ 0.3826834323650897717284599840303988667613445624856270,
+
+ -0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+ -0.9238795325112867561281831893967882868224166258636425,
+ 0.9238795325112867561281831893967882868224166258636425
+};
+
+static const FFTS_ALIGN(16) float ffts_constants_small_inv_32f[24] = {
+ 1.0f,
+ 1.0f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+
+ 0.0f,
+ -0.0f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ -0.7071067811865475244008443621048490392848359376884740f,
+
+ 1.0f,
+ 1.0f,
+ 0.9238795325112867561281831893967882868224166258636425f,
+ 0.9238795325112867561281831893967882868224166258636425f,
+
+ 0.0f,
+ -0.0f,
+ 0.3826834323650897717284599840303988667613445624856270f,
+ -0.3826834323650897717284599840303988667613445624856270f,
+
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.3826834323650897717284599840303988667613445624856270f,
+ 0.3826834323650897717284599840303988667613445624856270f,
+
+ 0.7071067811865475244008443621048490392848359376884740f,
+ -0.7071067811865475244008443621048490392848359376884740f,
+ 0.9238795325112867561281831893967882868224166258636425f,
+ -0.9238795325112867561281831893967882868224166258636425f
+};
+
+static const FFTS_ALIGN(16) double ffts_constants_small_inv_64f[24] = {
+ 1.0,
+ 1.0,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+
+ 0.0,
+ -0.0,
+ 0.7071067811865475244008443621048490392848359376884740,
+ -0.7071067811865475244008443621048490392848359376884740,
+
+ 1.0,
+ 1.0,
+ 0.9238795325112867561281831893967882868224166258636425,
+ 0.9238795325112867561281831893967882868224166258636425,
+
+ 0.0,
+ -0.0,
+ 0.3826834323650897717284599840303988667613445624856270,
+ -0.3826834323650897717284599840303988667613445624856270,
+
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.3826834323650897717284599840303988667613445624856270,
+ 0.3826834323650897717284599840303988667613445624856270,
+
+ 0.7071067811865475244008443621048490392848359376884740,
+ -0.7071067811865475244008443621048490392848359376884740,
+ 0.9238795325112867561281831893967882868224166258636425,
+ -0.9238795325112867561281831893967882868224166258636425
+};
+
+static const FFTS_ALIGN(16) float ffts_constants_32f[16] = {
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+
+ -0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ -0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+
+ 1.0f,
+ 1.0f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+
+ 0.0f,
+ 0.0f,
+ -0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f
+};
+
+static const FFTS_ALIGN(16) double ffts_constants_64f[16] = {
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+
+ -0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+ -0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+
+ 1.0,
+ 1.0,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+
+ 0.0,
+ 0.0,
+ -0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740
+};
+
+static const FFTS_ALIGN(16) float ffts_constants_inv_32f[16] = {
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+
+ 0.7071067811865475244008443621048490392848359376884740f,
+ -0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ -0.7071067811865475244008443621048490392848359376884740f,
+
+ 1.0f,
+ 1.0f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+
+ 0.0f,
+ 0.0f,
+ 0.7071067811865475244008443621048490392848359376884740f,
+ -0.7071067811865475244008443621048490392848359376884740f
+};
+
+static const FFTS_ALIGN(16) double ffts_constants_inv_64f[16] = {
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+
+ 0.7071067811865475244008443621048490392848359376884740,
+ -0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+ -0.7071067811865475244008443621048490392848359376884740,
+
+ 1.0,
+ 1.0,
+ 0.7071067811865475244008443621048490392848359376884740,
+ 0.7071067811865475244008443621048490392848359376884740,
+
+ 0.0,
+ 0.0,
+ 0.7071067811865475244008443621048490392848359376884740,
+ -0.7071067811865475244008443621048490392848359376884740
+};
+
+static FFTS_INLINE void
+V4SF_K_0(int inv,
+ V4SF *r0,
+ V4SF *r1,
+ V4SF *r2,
+ V4SF *r3)
+{
+ V4SF t0, t1, t2, t3;
+
+ t0 = *r0;
+ t1 = *r1;
+
+ t2 = V4SF_ADD(*r2, *r3);
+ t3 = V4SF_IMULI(inv, V4SF_SUB(*r2, *r3));
+
+ *r0 = V4SF_ADD(t0, t2);
+ *r2 = V4SF_SUB(t0, t2);
+ *r1 = V4SF_SUB(t1, t3);
+ *r3 = V4SF_ADD(t1, t3);
+}
+
+static FFTS_INLINE void
+V4SF_L_2(const float *FFTS_RESTRICT i0,
+ const float *FFTS_RESTRICT i1,
+ const float *FFTS_RESTRICT i2,
+ const float *FFTS_RESTRICT i3,
+ V4SF *r0,
+ V4SF *r1,
+ V4SF *r2,
+ V4SF *r3)
+{
+ V4SF t0, t1, t2, t3;
+
+ t0 = V4SF_LD(i0);
+ t1 = V4SF_LD(i1);
+ t2 = V4SF_LD(i2);
+ t3 = V4SF_LD(i3);
+
+ *r0 = V4SF_ADD(t0, t1);
+ *r1 = V4SF_SUB(t0, t1);
+ *r2 = V4SF_ADD(t2, t3);
+ *r3 = V4SF_SUB(t2, t3);
+}
+
+static FFTS_INLINE void
+V4SF_L_4(int inv,
+ const float *FFTS_RESTRICT i0,
+ const float *FFTS_RESTRICT i1,
+ const float *FFTS_RESTRICT i2,
+ const float *FFTS_RESTRICT i3,
+ V4SF *r0,
+ V4SF *r1,
+ V4SF *r2,
+ V4SF *r3)
+{
+ V4SF t0, t1, t2, t3, t4, t5, t6, t7;
+
+ t0 = V4SF_LD(i0);
+ t1 = V4SF_LD(i1);
+ t2 = V4SF_LD(i2);
+ t3 = V4SF_LD(i3);
+
+ t4 = V4SF_ADD(t0, t1);
+ t5 = V4SF_SUB(t0, t1);
+ t6 = V4SF_ADD(t2, t3);
+ t7 = V4SF_IMULI(inv, V4SF_SUB(t2, t3));
+
+ *r0 = V4SF_ADD(t4, t6);
+ *r2 = V4SF_SUB(t4, t6);
+ *r1 = V4SF_SUB(t5, t7);
+ *r3 = V4SF_ADD(t5, t7);
+}
+
+static FFTS_INLINE void
+V4SF_LEAF_EE(float *const FFTS_RESTRICT out,
+ const ptrdiff_t *FFTS_RESTRICT os,
+ const float *FFTS_RESTRICT in,
+ const ptrdiff_t *FFTS_RESTRICT is,
+ int inv)
+{
+ const float *FFTS_RESTRICT LUT = inv ? ffts_constants_inv_32f : ffts_constants_32f;
- ffts_static_rec_i(p, data, N2);
- ffts_static_rec_i(p, data + N1, N3);
- ffts_static_rec_i(p, data + N1 + N2, N3);
- ffts_static_rec_i(p, data + N, N2);
- ffts_static_rec_i(p, data + N + N1, N2);
+ V4SF r0, r1, r2, r3, r4, r5, r6, r7;
- if(N == p->N) {
- neon_static_x8_t_i(data, N, ws);
- }else{
- neon_static_x8_i(data, N, ws);
- }
+ float *out0 = out + os[0];
+ float *out1 = out + os[1];
- }else if(N==16){
- neon_static_x4_i(data, N, p->ws);
- }
+ V4SF_L_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3);
+ V4SF_L_2(in + is[4], in + is[5], in + is[6], in + is[7], &r4, &r5, &r6, &r7);
+ V4SF_K_0(inv, &r0, &r2, &r4, &r6);
+ V4SF_K_N(inv, V4SF_LD(LUT + 0), V4SF_LD(LUT + 4), &r1, &r3, &r5, &r7);
+ V4SF_TX2(&r0, &r1);
+ V4SF_TX2(&r2, &r3);
+ V4SF_TX2(&r4, &r5);
+ V4SF_TX2(&r6, &r7);
+
+ V4SF_S_4(r0, r2, r4, r6, out0 + 0, out0 + 4, out0 + 8, out0 + 12);
+ V4SF_S_4(r1, r3, r5, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12);
}
-void ffts_static_rec_f(ffts_plan_t *p, float *data, size_t N) {
- if(N > 16) {
- size_t N1 = N >> 1;
- size_t N2 = N >> 2;
- size_t N3 = N >> 3;
- float *ws = ((float *)(p->ws)) + (p->ws_is[__builtin_ctzl(N)-4] << 1);
- ffts_static_rec_f(p, data, N2);
- ffts_static_rec_f(p, data + N1, N3);
- ffts_static_rec_f(p, data + N1 + N2, N3);
- ffts_static_rec_f(p, data + N, N2);
- ffts_static_rec_f(p, data + N + N1, N2);
+static FFTS_INLINE void
+V4SF_LEAF_EE2(float *const FFTS_RESTRICT out,
+ const ptrdiff_t *FFTS_RESTRICT os,
+ const float *FFTS_RESTRICT in,
+ const ptrdiff_t *FFTS_RESTRICT is,
+ int inv)
+{
+ const float *FFTS_RESTRICT LUT = inv ? ffts_constants_inv_32f : ffts_constants_32f;
+
+ V4SF r0, r1, r2, r3, r4, r5, r6, r7;
- if(N == p->N) {
- neon_static_x8_t_f(data, N, ws);
- }else{
- neon_static_x8_f(data, N, ws);
- }
+ float *out0 = out + os[0];
+ float *out1 = out + os[1];
- }else if(N==16){
- neon_static_x4_f(data, N, p->ws);
- }
+ V4SF_L_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r0, &r1, &r2, &r3);
+ V4SF_L_2(in + is[0], in + is[1], in + is[3], in + is[2], &r4, &r5, &r6, &r7);
+ V4SF_K_0(inv, &r0, &r2, &r4, &r6);
+ V4SF_K_N(inv, V4SF_LD(LUT + 0), V4SF_LD(LUT + 4), &r1, &r3, &r5, &r7);
+ V4SF_TX2(&r0, &r1);
+ V4SF_TX2(&r2, &r3);
+ V4SF_TX2(&r4, &r5);
+ V4SF_TX2(&r6, &r7);
+
+ V4SF_S_4(r0, r2, r4, r6, out0 + 0, out0 + 4, out0 + 8, out0 + 12);
+ V4SF_S_4(r1, r3, r5, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12);
}
-void ffts_static_transform_f(ffts_plan_t *p, const void *in, void *out) {
+static FFTS_INLINE void
+V4SF_LEAF_EO(float *const FFTS_RESTRICT out,
+ const ptrdiff_t *FFTS_RESTRICT os,
+ const float *FFTS_RESTRICT in,
+ const ptrdiff_t *FFTS_RESTRICT is,
+ int inv)
+{
+ const float *FFTS_RESTRICT LUT = inv ? ffts_constants_inv_32f : ffts_constants_32f;
+
+ V4SF r0, r1, r2, r3, r4, r5, r6, r7;
+
+ float *out0 = out + os[0];
+ float *out1 = out + os[1];
+
+ V4SF_L_4_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3);
+ V4SF_L_2_4(inv, in + is[4], in + is[5], in + is[6], in + is[7], &r4, &r5, &r6, &r7);
- if(__builtin_ctzl(p->N) & 1)
- neon_static_o_f(p, in, out);
- else
- neon_static_e_f(p, in, out);
- ffts_static_rec_f(p, out, p->N);
+ V4SF_S_4(r2, r3, r7, r6, out1 + 0, out1 + 4, out1 + 8, out1 + 12);
+ V4SF_K_N(inv, V4SF_LD(LUT + 8), V4SF_LD(LUT + 12), &r0, &r1, &r4, &r5);
+ V4SF_S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12);
}
+static FFTS_INLINE void
+V4SF_LEAF_OE(float *const FFTS_RESTRICT out,
+ const ptrdiff_t *FFTS_RESTRICT os,
+ const float *FFTS_RESTRICT in,
+ const ptrdiff_t *FFTS_RESTRICT is,
+ int inv)
+{
+ const float *FFTS_RESTRICT LUT = inv ? ffts_constants_inv_32f : ffts_constants_32f;
-void ffts_static_transform_i(ffts_plan_t *p, const void *in, void *out) {
+ V4SF r0, r1, r2, r3, r4, r5, r6, r7;
- if(__builtin_ctzl(p->N) & 1)
- neon_static_o_i(p, in, out);
- else
- neon_static_e_i(p, in, out);
- ffts_static_rec_i(p, out, p->N);
+ float *out0 = out + os[0];
+ float *out1 = out + os[1];
+
+ V4SF_L_4_2(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3);
+ V4SF_L_4_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r4, &r5, &r6, &r7);
+
+ V4SF_S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12);
+ V4SF_K_N(inv, V4SF_LD(LUT + 8), V4SF_LD(LUT + 12), &r6, &r7, &r2, &r3);
+ V4SF_S_4(r6, r7, r2, r3, out1 + 0, out1 + 4, out1 + 8, out1 + 12);
}
+
+static FFTS_INLINE void
+V4SF_LEAF_OO(float *const FFTS_RESTRICT out,
+ const ptrdiff_t *FFTS_RESTRICT os,
+ const float *FFTS_RESTRICT in,
+ const ptrdiff_t *FFTS_RESTRICT is,
+ int inv)
+{
+ V4SF r0, r1, r2, r3, r4, r5, r6, r7;
+
+ float *out0 = out + os[0];
+ float *out1 = out + os[1];
+
+ V4SF_L_4_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3);
+ V4SF_L_4_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r4, &r5, &r6, &r7);
+
+ V4SF_S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12);
+ V4SF_S_4(r2, r3, r6, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12);
+}
+
+static FFTS_INLINE void
+V4SF_X_4(int inv,
+ float *FFTS_RESTRICT data,
+ size_t N,
+ const float *FFTS_RESTRICT LUT)
+{
+ size_t i;
+
+ for (i = 0; i < N/8; i++) {
+ V4SF r0 = V4SF_LD(data);
+ V4SF r1 = V4SF_LD(data + 2*N/4);
+ V4SF r2 = V4SF_LD(data + 4*N/4);
+ V4SF r3 = V4SF_LD(data + 6*N/4);
+
+ V4SF_K_N(inv, V4SF_LD(LUT), V4SF_LD(LUT + 4), &r0, &r1, &r2, &r3);
+
+ V4SF_ST(data , r0);
+ V4SF_ST(data + 2*N/4, r1);
+ V4SF_ST(data + 4*N/4, r2);
+ V4SF_ST(data + 6*N/4, r3);
+
+ LUT += 8;
+ data += 4;
+ }
+}
+
+static FFTS_INLINE void
+V4SF_X_8(int inv,
+ float *FFTS_RESTRICT data0,
+ size_t N,
+ const float *FFTS_RESTRICT LUT)
+{
+ float *data1 = data0 + 1*N/4;
+ float *data2 = data0 + 2*N/4;
+ float *data3 = data0 + 3*N/4;
+ float *data4 = data0 + 4*N/4;
+ float *data5 = data0 + 5*N/4;
+ float *data6 = data0 + 6*N/4;
+ float *data7 = data0 + 7*N/4;
+ size_t i;
+
+ for (i = 0; i < N/16; i++) {
+ V4SF r0, r1, r2, r3, r4, r5, r6, r7;
+
+ r0 = V4SF_LD(data0);
+ r1 = V4SF_LD(data1);
+ r2 = V4SF_LD(data2);
+ r3 = V4SF_LD(data3);
+
+ V4SF_K_N(inv, V4SF_LD(LUT), V4SF_LD(LUT + 4), &r0, &r1, &r2, &r3);
+ r4 = V4SF_LD(data4);
+ r6 = V4SF_LD(data6);
+
+ V4SF_K_N(inv, V4SF_LD(LUT + 8), V4SF_LD(LUT + 12), &r0, &r2, &r4, &r6);
+ r5 = V4SF_LD(data5);
+ r7 = V4SF_LD(data7);
+
+ V4SF_K_N(inv, V4SF_LD(LUT + 16), V4SF_LD(LUT + 20), &r1, &r3, &r5, &r7);
+ LUT += 24;
+
+ V4SF_ST(data0, r0);
+ data0 += 4;
+
+ V4SF_ST(data1, r1);
+ data1 += 4;
+
+ V4SF_ST(data2, r2);
+ data2 += 4;
+
+ V4SF_ST(data3, r3);
+ data3 += 4;
+
+ V4SF_ST(data4, r4);
+ data4 += 4;
+
+ V4SF_ST(data5, r5);
+ data5 += 4;
+
+ V4SF_ST(data6, r6);
+ data6 += 4;
+
+ V4SF_ST(data7, r7);
+ data7 += 4;
+ }
+}
+
+static FFTS_INLINE void
+ffts_static_firstpass_odd_32f(float *const FFTS_RESTRICT out,
+ const float *FFTS_RESTRICT in,
+ const ffts_plan_t *FFTS_RESTRICT p,
+ int inv)
+{
+ size_t i, i0 = p->i0, i1 = p->i1;
+ const ptrdiff_t *is = (const ptrdiff_t*) p->is;
+ const ptrdiff_t *os = (const ptrdiff_t*) p->offsets;
+
+ for (i = i0; i > 0; --i) {
+ V4SF_LEAF_EE(out, os, in, is, inv);
+ in += 4;
+ os += 2;
+ }
+
+ for (i = i1; i > 0; --i) {
+ V4SF_LEAF_OO(out, os, in, is, inv);
+ in += 4;
+ os += 2;
+ }
+
+ V4SF_LEAF_OE(out, os, in, is, inv);
+ in += 4;
+ os += 2;
+
+ for (i = i1; i > 0; --i) {
+ V4SF_LEAF_EE2(out, os, in, is, inv);
+ in += 4;
+ os += 2;
+ }
+}
+
+void
+ffts_small_2_32f(ffts_plan_t *p, const void *in, void *out)
+{
+ const float *din = (const float*) in;
+ float *dout = (float*) out;
+ ffts_cpx_32f t0, t1, r0, r1;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ t0[0] = din[0];
+ t0[1] = din[1];
+ t1[0] = din[2];
+ t1[1] = din[3];
+
+ r0[0] = t0[0] + t1[0];
+ r0[1] = t0[1] + t1[1];
+ r1[0] = t0[0] - t1[0];
+ r1[1] = t0[1] - t1[1];
+
+ dout[0] = r0[0];
+ dout[1] = r0[1];
+ dout[2] = r1[0];
+ dout[3] = r1[1];
+}
+
+void
+ffts_small_2_64f(ffts_plan_t *p, const void *in, void *out)
+{
+ const double *din = (const double*) in;
+ double *dout = (double*) out;
+ ffts_cpx_64f t0, t1, r0, r1;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ t0[0] = din[0];
+ t0[1] = din[1];
+ t1[0] = din[2];
+ t1[1] = din[3];
+
+ r0[0] = t0[0] + t1[0];
+ r0[1] = t0[1] + t1[1];
+ r1[0] = t0[0] - t1[0];
+ r1[1] = t0[1] - t1[1];
+
+ dout[0] = r0[0];
+ dout[1] = r0[1];
+ dout[2] = r1[0];
+ dout[3] = r1[1];
+}
+
+void
+ffts_small_forward4_32f(ffts_plan_t *p, const void *in, void *out)
+{
+ const float *din = (const float*) in;
+ float *dout = (float*) out;
+ ffts_cpx_32f t0, t1, t2, t3, t4, t5, t6, t7;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ t0[0] = din[0];
+ t0[1] = din[1];
+ t1[0] = din[4];
+ t1[1] = din[5];
+ t2[0] = din[2];
+ t2[1] = din[3];
+ t3[0] = din[6];
+ t3[1] = din[7];
+
+ t4[0] = t0[0] + t1[0];
+ t4[1] = t0[1] + t1[1];
+ t5[0] = t0[0] - t1[0];
+ t5[1] = t0[1] - t1[1];
+ t6[0] = t2[0] + t3[0];
+ t6[1] = t2[1] + t3[1];
+ t7[0] = t2[0] - t3[0];
+ t7[1] = t2[1] - t3[1];
+
+ dout[0] = t4[0] + t6[0];
+ dout[1] = t4[1] + t6[1];
+ dout[4] = t4[0] - t6[0];
+ dout[5] = t4[1] - t6[1];
+ dout[2] = t5[0] + t7[1];
+ dout[3] = t5[1] - t7[0];
+ dout[6] = t5[0] - t7[1];
+ dout[7] = t5[1] + t7[0];
+}
+
+void
+ffts_small_forward4_64f(ffts_plan_t *p, const void *in, void *out)
+{
+ const double *din = (const double*) in;
+ double *dout = (double*) out;
+ ffts_cpx_64f t0, t1, t2, t3, t4, t5, t6, t7;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ t0[0] = din[0];
+ t0[1] = din[1];
+ t1[0] = din[4];
+ t1[1] = din[5];
+ t2[0] = din[2];
+ t2[1] = din[3];
+ t3[0] = din[6];
+ t3[1] = din[7];
+
+ t4[0] = t0[0] + t1[0];
+ t4[1] = t0[1] + t1[1];
+ t5[0] = t0[0] - t1[0];
+ t5[1] = t0[1] - t1[1];
+ t6[0] = t2[0] + t3[0];
+ t6[1] = t2[1] + t3[1];
+ t7[0] = t2[0] - t3[0];
+ t7[1] = t2[1] - t3[1];
+
+ dout[0] = t4[0] + t6[0];
+ dout[1] = t4[1] + t6[1];
+ dout[4] = t4[0] - t6[0];
+ dout[5] = t4[1] - t6[1];
+ dout[2] = t5[0] + t7[1];
+ dout[3] = t5[1] - t7[0];
+ dout[6] = t5[0] - t7[1];
+ dout[7] = t5[1] + t7[0];
+}
+
+void
+ffts_small_backward4_32f(ffts_plan_t *p, const void *in, void *out)
+{
+ const float *din = (const float*) in;
+ float *dout = (float*) out;
+ ffts_cpx_32f t0, t1, t2, t3, t4, t5, t6, t7;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ t0[0] = din[0];
+ t0[1] = din[1];
+ t1[0] = din[4];
+ t1[1] = din[5];
+ t2[0] = din[2];
+ t2[1] = din[3];
+ t3[0] = din[6];
+ t3[1] = din[7];
+
+ t4[0] = t0[0] + t1[0];
+ t4[1] = t0[1] + t1[1];
+ t5[0] = t0[0] - t1[0];
+ t5[1] = t0[1] - t1[1];
+ t6[0] = t2[0] + t3[0];
+ t6[1] = t2[1] + t3[1];
+ t7[0] = t2[0] - t3[0];
+ t7[1] = t2[1] - t3[1];
+
+ dout[0] = t4[0] + t6[0];
+ dout[1] = t4[1] + t6[1];
+ dout[4] = t4[0] - t6[0];
+ dout[5] = t4[1] - t6[1];
+ dout[2] = t5[0] - t7[1];
+ dout[3] = t5[1] + t7[0];
+ dout[6] = t5[0] + t7[1];
+ dout[7] = t5[1] - t7[0];
+}
+
+void
+ffts_small_backward4_64f(ffts_plan_t *p, const void *in, void *out)
+{
+ const double *din = (const double*) in;
+ double *dout = (double*) out;
+ ffts_cpx_64f t0, t1, t2, t3, t4, t5, t6, t7;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ t0[0] = din[0];
+ t0[1] = din[1];
+ t1[0] = din[4];
+ t1[1] = din[5];
+ t2[0] = din[2];
+ t2[1] = din[3];
+ t3[0] = din[6];
+ t3[1] = din[7];
+
+ t4[0] = t0[0] + t1[0];
+ t4[1] = t0[1] + t1[1];
+ t5[0] = t0[0] - t1[0];
+ t5[1] = t0[1] - t1[1];
+ t6[0] = t2[0] + t3[0];
+ t6[1] = t2[1] + t3[1];
+ t7[0] = t2[0] - t3[0];
+ t7[1] = t2[1] - t3[1];
+
+ dout[0] = t4[0] + t6[0];
+ dout[1] = t4[1] + t6[1];
+ dout[4] = t4[0] - t6[0];
+ dout[5] = t4[1] - t6[1];
+ dout[2] = t5[0] - t7[1];
+ dout[3] = t5[1] + t7[0];
+ dout[6] = t5[0] + t7[1];
+ dout[7] = t5[1] - t7[0];
+}
+
+void
+ffts_small_forward8_32f(ffts_plan_t *p, const void *in, void *out)
+{
+ const float *FFTS_RESTRICT lut = ffts_constants_small_32f;
+ const float *din = (const float*) in;
+ float *dout = (float*) out;
+ V4SF r0_1, r2_3, r4_5, r6_7;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ V4SF_L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7);
+ V4SF_K_N(0, V4SF_LD(lut), V4SF_LD(lut + 4), &r0_1, &r2_3, &r4_5, &r6_7);
+ V4SF_S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12);
+}
+
+void
+ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out)
+{
+ const double *din = (const double*) in;
+ double *dout = (double*) out;
+// V4SF r0_1, r2_3, r4_5, r6_7;
+// double *LUT8 = (double*) p->ws + p->ws_is[0];
+ (void) p;
+ (void) din;
+ (void) dout;
+
+#if MACROS_READY
+ L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7);
+ K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7);
+ S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12);
+#endif
+}
+
+void
+ffts_small_backward8_32f(ffts_plan_t *p, const void *in, void *out)
+{
+ const float *FFTS_RESTRICT lut = ffts_constants_small_inv_32f;
+ const float *din = (const float*) in;
+ float *dout = (float*) out;
+ V4SF r0_1, r2_3, r4_5, r6_7;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ V4SF_L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7);
+ V4SF_K_N(1, V4SF_LD(lut), V4SF_LD(lut+4), &r0_1, &r2_3, &r4_5, &r6_7);
+ V4SF_S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12);
+}
+
+void
+ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out)
+{
+ const double *din = (const double*) in;
+ double *dout = (double*) out;
+// V4SF r0_1, r2_3, r4_5, r6_7;
+// double *LUT8 = (double*) p->ws + p->ws_is[0];
+ (void) p;
+ (void) din;
+ (void) dout;
+
+
+#if MACROS_READY
+ L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7);
+ K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7);
+ S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12);
+#endif
+}
+
+void
+ffts_small_forward16_32f(ffts_plan_t *p, const void *in, void *out)
+{
+ const float *FFTS_RESTRICT lut = ffts_constants_small_32f;
+ const float *din = (const float*) in;
+ float *dout = (float*) out;
+ V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ V4SF_L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11);
+ V4SF_L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13);
+ V4SF_K_N(0, V4SF_LD(lut), V4SF_LD(lut+4), &r0_1, &r2_3, &r4_5, &r6_7);
+ V4SF_K_N(0, V4SF_LD(lut+8), V4SF_LD(lut+12), &r0_1, &r4_5, &r8_9, &r12_13);
+ V4SF_S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24);
+ V4SF_K_N(0, V4SF_LD(lut+16), V4SF_LD(lut+20), &r2_3, &r6_7, &r10_11, &r14_15);
+ V4SF_S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28);
+}
+
+void
+ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out)
+{
+ const double *din = (const double*) in;
+ double *dout = (double*) out;
+// double *LUT8 = (double*) p->ws;
+// V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15;
+ (void) p;
+ (void) din;
+ (void) dout;
+
+#ifdef MACROS_READY
+ L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11);
+ L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13);
+ K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7);
+ K_N(0, VLD(LUT8+8), VLD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13);
+ S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24);
+ K_N(0, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15);
+ S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28);
+#endif
+}
+
+void
+ffts_small_backward16_32f(ffts_plan_t *p, const void *in, void *out)
+{
+ const float *FFTS_RESTRICT lut = ffts_constants_small_inv_32f;
+ const float *din = (const float*) in;
+ float *dout = (float*) out;
+ V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15;
+
+ /* unreferenced parameter */
+ (void) p;
+
+ V4SF_L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11);
+ V4SF_L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13);
+ V4SF_K_N(1, V4SF_LD(lut), V4SF_LD(lut+4), &r0_1, &r2_3, &r4_5, &r6_7);
+ V4SF_K_N(1, V4SF_LD(lut+8), V4SF_LD(lut+12), &r0_1, &r4_5, &r8_9, &r12_13);
+ V4SF_S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24);
+ V4SF_K_N(1, V4SF_LD(lut+16), V4SF_LD(lut+20), &r2_3, &r6_7, &r10_11, &r14_15);
+ V4SF_S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28);
+}
+
+void
+ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out)
+{
+ const double *din = (const double*) in;
+ double *dout = (double*) out;
+// double *LUT8 = (double*) p->ws;
+// V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15;
+ (void) p;
+ (void) din;
+ (void) dout;
+
+#ifdef MACROS_READY
+ L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11);
+ L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13);
+ K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7);
+ K_N(1, VLD(LUT8+8), VLD(LUT8+12),&r0_1, &r4_5, &r8_9, &r12_13);
+ S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24);
+ K_N(1, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15);
+ S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28);
+#endif
+}
+
+static FFTS_INLINE void
+ffts_static_firstpass_even_32f(float *FFTS_RESTRICT out,
+ const float *FFTS_RESTRICT in,
+ const ffts_plan_t *FFTS_RESTRICT p,
+ int inv)
+{
+ size_t i, i0 = p->i0, i1 = p->i1;
+ const ptrdiff_t *is = (const ptrdiff_t*) p->is;
+ const ptrdiff_t *os = (const ptrdiff_t*) p->offsets;
+
+ for(i = i0; i > 0; --i) {
+ V4SF_LEAF_EE(out, os, in, is, inv);
+ in += 4;
+ os += 2;
+ }
+
+ V4SF_LEAF_EO(out, os, in, is, inv);
+ in += 4;
+ os += 2;
+
+ for (i = i1; i > 0; --i) {
+ V4SF_LEAF_OO(out, os, in, is, inv);
+ in += 4;
+ os += 2;
+ }
+
+ for (i = i1; i > 0; --i) {
+ V4SF_LEAF_EE2(out, os, in, is, inv);
+ in += 4;
+ os += 2;
+ }
+}
+
+static void
+ffts_static_rec_f_32f(const ffts_plan_t *p, float *data, size_t N)
+{
+ const float *ws = (const float*) p->ws;
+
+#if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED)
+ if (N > 128) {
+ const size_t N1 = N >> 1;
+ const size_t N2 = N >> 2;
+ const size_t N3 = N >> 3;
+
+ ffts_static_rec_f_32f(p, data , N2);
+ ffts_static_rec_f_32f(p, data + N1 , N3);
+ ffts_static_rec_f_32f(p, data + N1 + N2, N3);
+ ffts_static_rec_f_32f(p, data + N , N2);
+ ffts_static_rec_f_32f(p, data + N + N1 , N2);
+
+ neon_static_x8_f(data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1));
+ } else if (N == 128) {
+ const float *ws1 = ws + (p->ws_is[1] << 1);
+
+ neon_static_x8_f(data, 32, ws1);
+
+ neon_static_x4_f(data + 64, ws);
+ neon_static_x4_f(data + 96, ws);
+
+ neon_static_x8_f(data + 128, 32, ws1);
+ neon_static_x8_f(data + 192, 32, ws1);
+
+ neon_static_x8_f(data, 128, ws + (p->ws_is[3] << 1));
+ } else if (N == 64) {
+ neon_static_x4_f(data , ws);
+ neon_static_x4_f(data + 64, ws);
+ neon_static_x4_f(data + 96, ws);
+
+ neon_static_x8_f(data, 64, ws + (p->ws_is[2] << 1));
+ } else {
+ assert(N == 32);
+ neon_static_x8_f(data, 32, ws + (p->ws_is[1] << 1));
+ }
+#else
+ if (N > 128) {
+ const size_t N1 = N >> 1;
+ const size_t N2 = N >> 2;
+ const size_t N3 = N >> 3;
+
+ ffts_static_rec_f_32f(p, data , N2);
+ ffts_static_rec_f_32f(p, data + N1 , N3);
+ ffts_static_rec_f_32f(p, data + N1 + N2, N3);
+ ffts_static_rec_f_32f(p, data + N , N2);
+ ffts_static_rec_f_32f(p, data + N + N1 , N2);
+
+ V4SF_X_8(0, data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1));
+ } else if (N == 128) {
+ const float *ws1 = ws + (p->ws_is[1] << 1);
+
+ V4SF_X_8(0, data + 0, 32, ws1);
+ V4SF_X_4(0, data + 64, 16, ws);
+ V4SF_X_4(0, data + 96, 16, ws);
+ V4SF_X_8(0, data + 128, 32, ws1);
+ V4SF_X_8(0, data + 192, 32, ws1);
+
+ V4SF_X_8(0, data, 128, ws + (p->ws_is[3] << 1));
+ } else if (N == 64) {
+ V4SF_X_4(0, data + 0, 16, ws);
+ V4SF_X_4(0, data + 64, 16, ws);
+ V4SF_X_4(0, data + 96, 16, ws);
+
+ V4SF_X_8(0, data, 64, ws + (p->ws_is[2] << 1));
+ } else {
+ assert(N == 32);
+ V4SF_X_8(0, data, 32, ws + (p->ws_is[1] << 1));
+ }
+#endif
+}
+
+static void
+ffts_static_rec_i_32f(const ffts_plan_t *p, float *data, size_t N)
+{
+ const float *ws = (const float*) p->ws;
+
+#if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED)
+ if (N > 128) {
+ const size_t N1 = N >> 1;
+ const size_t N2 = N >> 2;
+ const size_t N3 = N >> 3;
+
+ ffts_static_rec_i_32f(p, data , N2);
+ ffts_static_rec_i_32f(p, data + N1 , N3);
+ ffts_static_rec_i_32f(p, data + N1 + N2, N3);
+ ffts_static_rec_i_32f(p, data + N , N2);
+ ffts_static_rec_i_32f(p, data + N + N1 , N2);
+
+ neon_static_x8_i(data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1));
+ } else if (N == 128) {
+ const float *ws1 = ws + (p->ws_is[1] << 1);
+
+ neon_static_x8_i(data, 32, ws1);
+
+ neon_static_x4_i(data + 64, ws);
+ neon_static_x4_i(data + 96, ws);
+
+ neon_static_x8_i(data + 128, 32, ws1);
+ neon_static_x8_i(data + 192, 32, ws1);
+
+ neon_static_x8_i(data, 128, ws + (p->ws_is[3] << 1));
+ } else if (N == 64) {
+ neon_static_x4_i(data , ws);
+ neon_static_x4_i(data + 64, ws);
+ neon_static_x4_i(data + 96, ws);
+
+ neon_static_x8_i(data, 64, ws + (p->ws_is[2] << 1));
+ } else {
+ assert(N == 32);
+ neon_static_x8_i(data, 32, ws + (p->ws_is[1] << 1));
+ }
+#else
+ if (N > 128) {
+ const size_t N1 = N >> 1;
+ const size_t N2 = N >> 2;
+ const size_t N3 = N >> 3;
+
+ ffts_static_rec_i_32f(p, data , N2);
+ ffts_static_rec_i_32f(p, data + N1 , N3);
+ ffts_static_rec_i_32f(p, data + N1 + N2, N3);
+ ffts_static_rec_i_32f(p, data + N , N2);
+ ffts_static_rec_i_32f(p, data + N + N1 , N2);
+
+ V4SF_X_8(1, data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1));
+ } else if (N == 128) {
+ const float *ws1 = ws + (p->ws_is[1] << 1);
+
+ V4SF_X_8(1, data + 0, 32, ws1);
+ V4SF_X_4(1, data + 64, 16, ws);
+ V4SF_X_4(1, data + 96, 16, ws);
+ V4SF_X_8(1, data + 128, 32, ws1);
+ V4SF_X_8(1, data + 192, 32, ws1);
+
+ V4SF_X_8(1, data, 128, ws + (p->ws_is[3] << 1));
+ } else if (N == 64) {
+ V4SF_X_4(1, data + 0, 16, ws);
+ V4SF_X_4(1, data + 64, 16, ws);
+ V4SF_X_4(1, data + 96, 16, ws);
+
+ V4SF_X_8(1, data, 64, ws + (p->ws_is[2] << 1));
+ } else {
+ assert(N == 32);
+ V4SF_X_8(1, data, 32, ws + (p->ws_is[1] << 1));
+ }
+#endif
+}
+
+void
+ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out)
+{
+ const float *din = (const float*) in;
+ float *dout = (float*) out;
+
+ const size_t N = p->N;
+ const int N_log_2 = ffts_ctzl(N);
+
+#if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED)
+ const float *ws = (const float*) p->ws;
+
+ if (N_log_2 & 1) {
+ neon_static_o_f(p, din, dout);
+ } else {
+ neon_static_e_f(p, din, dout);
+ }
+
+ if (N > 128) {
+ const size_t N1 = N >> 1;
+ const size_t N2 = N >> 2;
+ const size_t N3 = N >> 3;
+
+ ffts_static_rec_f_32f(p, dout , N2);
+ ffts_static_rec_f_32f(p, dout + N1 , N3);
+ ffts_static_rec_f_32f(p, dout + N1 + N2, N3);
+ ffts_static_rec_f_32f(p, dout + N , N2);
+ ffts_static_rec_f_32f(p, dout + N + N1 , N2);
+
+ neon_static_x8_t_f(dout, N, ws + (p->ws_is[N_log_2 - 4] << 1));
+ } else if (N == 128) {
+ neon_static_x8_f(dout, 32, ws + 8);
+
+ neon_static_x4_f(dout + 64, ws);
+ neon_static_x4_f(dout + 96, ws);
+
+ neon_static_x8_f(dout + 128, 32, ws + 8);
+ neon_static_x8_f(dout + 192, 32, ws + 8);
+
+ neon_static_x8_t_f(dout, 128, ws + 80);
+ } else if (N == 64) {
+ neon_static_x4_f(dout , ws);
+ neon_static_x4_f(dout + 64, ws);
+ neon_static_x4_f(dout + 96, ws);
+
+ neon_static_x8_t_f(dout, 64, ws + 32);
+ } else {
+ assert(N == 32);
+ neon_static_x8_t_f(dout, 32, ws + 8);
+ }
+#else
+ if (N_log_2 & 1) {
+ ffts_static_firstpass_odd_32f(dout, din, p, 0);
+ } else {
+ ffts_static_firstpass_even_32f(dout, din, p, 0);
+ }
+
+ ffts_static_rec_f_32f(p, dout, N);
+#endif
+}
+
+void
+ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out)
+{
+ const float *din = (const float*) in;
+ float *dout = (float*) out;
+
+ const size_t N = p->N;
+ const int N_log_2 = ffts_ctzl(N);
+
+#if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED)
+ const float *ws = (const float*) p->ws;
+
+ if (N_log_2 & 1) {
+ neon_static_o_i(p, din, dout);
+ } else {
+ neon_static_e_i(p, din, dout);
+ }
+
+ if (N > 128) {
+ const size_t N1 = N >> 1;
+ const size_t N2 = N >> 2;
+ const size_t N3 = N >> 3;
+
+ ffts_static_rec_i_32f(p, dout , N2);
+ ffts_static_rec_i_32f(p, dout + N1 , N3);
+ ffts_static_rec_i_32f(p, dout + N1 + N2, N3);
+ ffts_static_rec_i_32f(p, dout + N , N2);
+ ffts_static_rec_i_32f(p, dout + N + N1 , N2);
+
+ neon_static_x8_t_i(dout, N, ws + (p->ws_is[N_log_2 - 4] << 1));
+ } else if (N == 128) {
+ neon_static_x8_i(dout, 32, ws + 8);
+
+ neon_static_x4_i(dout + 64, ws);
+ neon_static_x4_i(dout + 96, ws);
+
+ neon_static_x8_i(dout + 128, 32, ws + 8);
+ neon_static_x8_i(dout + 192, 32, ws + 8);
+
+ neon_static_x8_t_i(dout, 128, ws + 80);
+ } else if (N == 64) {
+ neon_static_x4_i(dout , ws);
+ neon_static_x4_i(dout + 64, ws);
+ neon_static_x4_i(dout + 96, ws);
+
+ neon_static_x8_t_i(dout, 64, ws + 32);
+ } else {
+ assert(N == 32);
+ neon_static_x8_t_i(dout, 32, ws + 8);
+ }
+#else
+ if (N_log_2 & 1) {
+ ffts_static_firstpass_odd_32f(dout, din, p, 1);
+ } else {
+ ffts_static_firstpass_even_32f(dout, din, p, 1);
+ }
+
+ ffts_static_rec_i_32f(p, dout, N);
+#endif
+} \ No newline at end of file
diff --git a/lib/ffts/src/ffts_static.h b/lib/ffts/src/ffts_static.h
index 4490bde..5a42fc2 100644
--- a/lib/ffts/src/ffts_static.h
+++ b/lib/ffts/src/ffts_static.h
@@ -1,46 +1,91 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __FFTS_STATIC_H__
-#define __FFTS_STATIC_H__
+#ifndef FFTS_STATIC_H
+#define FFTS_STATIC_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
#include "ffts.h"
-#include "neon.h"
-void ffts_static_rec_f(ffts_plan_t *p, float *data, size_t N) ;
-void ffts_static_transform_f(ffts_plan_t *p, const void *in, void *out);
+void
+ffts_small_2_32f(ffts_plan_t *p, const void *in, void *out);
-void ffts_static_rec_i(ffts_plan_t *p, float *data, size_t N) ;
-void ffts_static_transform_i(ffts_plan_t *p, const void *in, void *out);
+void
+ffts_small_2_64f(ffts_plan_t *p, const void *in, void *out);
-#endif
+void
+ffts_small_forward4_32f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_forward4_64f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_backward4_32f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_backward4_64f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_forward8_32f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_backward8_32f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_forward16_32f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_backward16_32f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out);
+
+void
+ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out);
+
+#endif /* FFTS_STATIC_H */
diff --git a/lib/ffts/src/ffts_transpose.c b/lib/ffts/src/ffts_transpose.c
new file mode 100644
index 0000000..272cb48
--- /dev/null
+++ b/lib/ffts/src/ffts_transpose.c
@@ -0,0 +1,194 @@
+/*
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2016, Jukka Ojanen <jukka.ojanen@kolumbus.fi>
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "ffts_transpose.h"
+#include "ffts_internal.h"
+
+#ifdef HAVE_NEON
+#include "neon.h"
+#include <arm_neon.h>
+#elif HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#define TSIZE 8
+
+void
+ffts_transpose(uint64_t *in, uint64_t *out, int w, int h)
+{
+#ifdef HAVE_NEON
+#if 0
+ neon_transpose4(in, out, w, h);
+#else
+ neon_transpose8(in, out, w, h);
+#endif
+#elif HAVE_SSE2
+ uint64_t FFTS_ALIGN(64) tmp[TSIZE*TSIZE];
+ int tx, ty;
+ /* int x; */
+ int y;
+ int tw = w / TSIZE;
+ int th = h / TSIZE;
+
+ for (ty = 0; ty < th; ty++) {
+ for (tx = 0; tx < tw; tx++) {
+ uint64_t *ip0 = in + w*TSIZE*ty + tx * TSIZE;
+ uint64_t *op0 = tmp; /* out + h*TSIZE*tx + ty*TSIZE; */
+
+ /* copy/transpose to tmp */
+ for (y = 0; y < TSIZE; y += 2) {
+ /* for (x=0;x<TSIZE;x+=2) {
+ op[x*TSIZE] = ip[x];
+ */
+ __m128d q0 = _mm_load_pd((double*)(ip0 + 0*w));
+ __m128d q1 = _mm_load_pd((double*)(ip0 + 1*w));
+ __m128d q2 = _mm_load_pd((double*)(ip0 + 2*w));
+ __m128d q3 = _mm_load_pd((double*)(ip0 + 3*w));
+ __m128d q4 = _mm_load_pd((double*)(ip0 + 4*w));
+ __m128d q5 = _mm_load_pd((double*)(ip0 + 5*w));
+ __m128d q6 = _mm_load_pd((double*)(ip0 + 6*w));
+ __m128d q7 = _mm_load_pd((double*)(ip0 + 7*w));
+
+ __m128d t0 = _mm_shuffle_pd(q0, q1, _MM_SHUFFLE2(0, 0));
+ __m128d t1 = _mm_shuffle_pd(q0, q1, _MM_SHUFFLE2(1, 1));
+ __m128d t2 = _mm_shuffle_pd(q2, q3, _MM_SHUFFLE2(0, 0));
+ __m128d t3 = _mm_shuffle_pd(q2, q3, _MM_SHUFFLE2(1, 1));
+ __m128d t4 = _mm_shuffle_pd(q4, q5, _MM_SHUFFLE2(0, 0));
+ __m128d t5 = _mm_shuffle_pd(q4, q5, _MM_SHUFFLE2(1, 1));
+ __m128d t6 = _mm_shuffle_pd(q6, q7, _MM_SHUFFLE2(0, 0));
+ __m128d t7 = _mm_shuffle_pd(q6, q7, _MM_SHUFFLE2(1, 1));
+
+ ip0 += 2;
+ /* _mm_store_pd((double *)(op0 + y*h + x), t0);
+ _mm_store_pd((double *)(op0 + y*h + x + h), t1);
+ */
+
+ _mm_store_pd((double*)(op0 + 0 ), t0);
+ _mm_store_pd((double*)(op0 + 0 + TSIZE), t1);
+ _mm_store_pd((double*)(op0 + 2 ), t2);
+ _mm_store_pd((double*)(op0 + 2 + TSIZE), t3);
+ _mm_store_pd((double*)(op0 + 4 ), t4);
+ _mm_store_pd((double*)(op0 + 4 + TSIZE), t5);
+ _mm_store_pd((double*)(op0 + 6 ), t6);
+ _mm_store_pd((double*)(op0 + 6 + TSIZE), t7);
+ /* } */
+
+ op0 += 2*TSIZE;
+ }
+
+ op0 = out + h*tx*TSIZE + ty*TSIZE;
+ ip0 = tmp;
+ for (y = 0; y < TSIZE; y += 1) {
+ /* memcpy(op0, ip0, TSIZE * sizeof(*ip0)); */
+
+ __m128d q0 = _mm_load_pd((double*)(ip0 + 0));
+ __m128d q1 = _mm_load_pd((double*)(ip0 + 2));
+ __m128d q2 = _mm_load_pd((double*)(ip0 + 4));
+ __m128d q3 = _mm_load_pd((double*)(ip0 + 6));
+
+ _mm_store_pd((double*)(op0 + 0), q0);
+ _mm_store_pd((double*)(op0 + 2), q1);
+ _mm_store_pd((double*)(op0 + 4), q2);
+ _mm_store_pd((double*)(op0 + 6), q3);
+
+ op0 += h;
+ ip0 += TSIZE;
+ }
+ }
+ }
+ /*
+ size_t i,j;
+ for(i=0;i<w;i+=2) {
+ for(j=0;j<h;j+=2) {
+ // out[i*h + j] = in[j*w + i];
+ __m128d q0 = _mm_load_pd((double *)(in + j*w + i));
+ __m128d q1 = _mm_load_pd((double *)(in + j*w + i + w));
+ __m128d t0 = _mm_shuffle_pd(q0, q1, _MM_SHUFFLE2(0, 0));
+ __m128d t1 = _mm_shuffle_pd(q0, q1, _MM_SHUFFLE2(1, 1));
+ _mm_store_pd((double *)(out + i*h + j), t0);
+ _mm_store_pd((double *)(out + i*h + j + h), t1);
+ }
+ }
+ */
+#else
+ const int bw = 1;
+ const int bh = 8;
+ int i = 0, j = 0;
+
+ for (; i <= h - bh; i += bh) {
+ for (j = 0; j <= w - bw; j += bw) {
+ uint64_t const *ib = &in[w*i + j];
+ uint64_t *ob = &out[h*j + i];
+
+ uint64_t s_0_0 = ib[0*w + 0];
+ uint64_t s_1_0 = ib[1*w + 0];
+ uint64_t s_2_0 = ib[2*w + 0];
+ uint64_t s_3_0 = ib[3*w + 0];
+ uint64_t s_4_0 = ib[4*w + 0];
+ uint64_t s_5_0 = ib[5*w + 0];
+ uint64_t s_6_0 = ib[6*w + 0];
+ uint64_t s_7_0 = ib[7*w + 0];
+
+ ob[0*h + 0] = s_0_0;
+ ob[0*h + 1] = s_1_0;
+ ob[0*h + 2] = s_2_0;
+ ob[0*h + 3] = s_3_0;
+ ob[0*h + 4] = s_4_0;
+ ob[0*h + 5] = s_5_0;
+ ob[0*h + 6] = s_6_0;
+ ob[0*h + 7] = s_7_0;
+ }
+ }
+
+ if (i < h) {
+ int i1;
+
+ for (i1 = 0; i1 < w; i1++) {
+ for (j = i; j < h; j++) {
+ out[i1*h + j] = in[j*w + i1];
+ }
+ }
+ }
+
+ if (j < w) {
+ int j1;
+
+ for (i = j; i < w; i++) {
+ for (j1 = 0; j1 < h; j1++) {
+ out[i*h + j1] = in[j1*w + i];
+ }
+ }
+ }
+#endif
+} \ No newline at end of file
diff --git a/lib/ffts/src/ffts_transpose.h b/lib/ffts/src/ffts_transpose.h
new file mode 100644
index 0000000..fe376cb
--- /dev/null
+++ b/lib/ffts/src/ffts_transpose.h
@@ -0,0 +1,46 @@
+/*
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef FFTS_TRANSPOSE_H
+#define FFTS_TRANSPOSE_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
+
+#include "ffts_internal.h"
+
+void
+ffts_transpose(uint64_t *in, uint64_t *out, int w, int h);
+
+#endif /* FFTS_TRANSPOSE_H */
diff --git a/lib/ffts/src/ffts_trig.c b/lib/ffts/src/ffts_trig.c
new file mode 100644
index 0000000..74ebfd2
--- /dev/null
+++ b/lib/ffts/src/ffts_trig.c
@@ -0,0 +1,628 @@
+/*
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2015, Jukka Ojanen <jukka.ojanen@kolumbus.fi>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "ffts_trig.h"
+#include "ffts_dd.h"
+
+/* 1/(2*cos(pow(2,-p)*pi)) */
+static const FFTS_ALIGN(16) unsigned int half_secant[132] = {
+ 0x00000000, 0x3fe00000, 0xc9be45de, 0x3be3bd3c,
+ 0x00000000, 0x3fe00000, 0xc9be45de, 0x3c03bd3c,
+ 0x00000000, 0x3fe00000, 0xc9be45de, 0x3c23bd3c,
+ 0x00000000, 0x3fe00000, 0xc9be45de, 0x3c43bd3c,
+ 0x00000000, 0x3fe00000, 0xc9be45de, 0x3c63bd3c,
+ 0x00000000, 0x3fe00000, 0xc9be45df, 0x3c83bd3c,
+ 0x00000001, 0x3fe00000, 0x4df22efd, 0x3c7de9e6,
+ 0x00000005, 0x3fe00000, 0x906e8725, 0xbc60b0cd,
+ 0x00000014, 0x3fe00000, 0x906e8357, 0xbc80b0cd,
+ 0x0000004f, 0x3fe00000, 0x0dce83c9, 0xbc5619b2,
+ 0x0000013c, 0x3fe00000, 0x0dc6e79a, 0xbc7619b2,
+ 0x000004ef, 0x3fe00000, 0xe4af1240, 0x3c83cc9b,
+ 0x000013bd, 0x3fe00000, 0x2d14c08a, 0x3c7e64df,
+ 0x00004ef5, 0x3fe00000, 0x47a85465, 0xbc59b20b,
+ 0x00013bd4, 0x3fe00000, 0xab79c897, 0xbc79b203,
+ 0x0004ef4f, 0x3fe00000, 0x15019a96, 0x3c79386b,
+ 0x0013bd3d, 0x3fe00000, 0x7d6dbf4b, 0xbc7b16b7,
+ 0x004ef4f3, 0x3fe00000, 0xf30832e0, 0x3c741ee4,
+ 0x013bd3cd, 0x3fe00000, 0xd3bcd4bb, 0xbc83f41e,
+ 0x04ef4f34, 0x3fe00000, 0xdd75aebb, 0xbc82ef06,
+ 0x13bd3cde, 0x3fe00000, 0xb2b41b3d, 0x3c52d979,
+ 0x4ef4f46c, 0x3fe00000, 0x4f0fb458, 0xbc851db3,
+ 0x3bd3e0e7, 0x3fe00001, 0x8a0ce3f0, 0x3c58dbab,
+ 0xef507722, 0x3fe00004, 0x2a8ec295, 0x3c83e351,
+ 0xbd5114f9, 0x3fe00013, 0xc4c0d92d, 0x3c8b3ca4,
+ 0xf637de7d, 0x3fe0004e, 0xb74de729, 0x3c45974e,
+ 0xe8190891, 0x3fe0013b, 0x26edf4da, 0xbc814c20,
+ 0x9436640e, 0x3fe004f0, 0xe2b34b50, 0x3c8091ab,
+ 0x9c61d971, 0x3fe013d1, 0x6ce01b8e, 0x3c7f7df7,
+ 0xd17cba53, 0x3fe0503e, 0x74ad7633, 0xbc697609,
+ 0x7bdb3895, 0x3fe1517a, 0x82f9091b, 0xbc8008d1,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+/* cos(pow(2,-p)*pi), sin(pow(2,-p)*pi) */
+static const FFTS_ALIGN(16) unsigned int cos_sin_pi_table[264] = {
+ 0x00000000, 0x3ff00000, 0x54442d18, 0x3df921fb,
+ 0xc9be45de, 0xbbf3bd3c, 0xbb77974f, 0x3a91a390,
+ 0x00000000, 0x3ff00000, 0x54442d18, 0x3e0921fb,
+ 0xc9be45de, 0xbc13bd3c, 0x54a14928, 0x3aa19bd0,
+ 0x00000000, 0x3ff00000, 0x54442d18, 0x3e1921fb,
+ 0xc9be45de, 0xbc33bd3c, 0xb948108a, 0x3ab17cce,
+ 0x00000000, 0x3ff00000, 0x54442d18, 0x3e2921fb,
+ 0xc9be45de, 0xbc53bd3c, 0x4be32e14, 0x3ac100c8,
+ 0x00000000, 0x3ff00000, 0x54442d18, 0x3e3921fb,
+ 0xc9be45de, 0xbc73bd3c, 0x2c9f4879, 0x3ace215d,
+ 0xffffffff, 0x3fefffff, 0x54442d18, 0x3e4921fb,
+ 0x6c837443, 0x3c888586, 0x0005f376, 0x3acd411f,
+ 0xfffffffe, 0x3fefffff, 0x54442d18, 0x3e5921fb,
+ 0x4df22ef1, 0xbc8de9e6, 0x9937209e, 0xbaf7b153,
+ 0xfffffff6, 0x3fefffff, 0x54442d16, 0x3e6921fb,
+ 0x906e88aa, 0x3c70b0cd, 0xfe19968a, 0xbb03b7c0,
+ 0xffffffd9, 0x3fefffff, 0x54442d0e, 0x3e7921fb,
+ 0xdf22ed26, 0xbc8e9e64, 0x8d1b6ffb, 0xbaee8bb4,
+ 0xffffff62, 0x3fefffff, 0x54442cef, 0x3e8921fb,
+ 0x0dd18f0f, 0x3c6619b2, 0x7f2b20fb, 0xbb00e133,
+ 0xfffffd88, 0x3fefffff, 0x54442c73, 0x3e9921fb,
+ 0x0dd314b2, 0x3c8619b2, 0x619fdf6e, 0xbb174e98,
+ 0xfffff621, 0x3fefffff, 0x54442a83, 0x3ea921fb,
+ 0x3764acf5, 0x3c8866c8, 0xf5b2407f, 0xbb388215,
+ 0xffffd886, 0x3fefffff, 0x544422c2, 0x3eb921fb,
+ 0x20e7a944, 0xbc8e64df, 0x7b9b9f23, 0x3b5a0961,
+ 0xffff6216, 0x3fefffff, 0x544403c1, 0x3ec921fb,
+ 0x52ee25ea, 0x3c69b20e, 0x4df6a86a, 0xbb5999d9,
+ 0xfffd8858, 0x3fefffff, 0x544387ba, 0x3ed921fb,
+ 0xd8910ead, 0x3c89b20f, 0x0809d04d, 0x3b77d9db,
+ 0xfff62162, 0x3fefffff, 0x544197a1, 0x3ee921fb,
+ 0x438d3925, 0xbc8937a8, 0xa5d27f7a, 0xbb858b02,
+ 0xffd88586, 0x3fefffff, 0x5439d73a, 0x3ef921fb,
+ 0x94b3ddd2, 0x3c8b22e4, 0xf8a3b73d, 0xbb863c7f,
+ 0xff62161a, 0x3fefffff, 0x541ad59e, 0x3f0921fb,
+ 0x7ea469b2, 0xbc835c13, 0xb8cee262, 0x3bae9860,
+ 0xfd885867, 0x3fefffff, 0x539ecf31, 0x3f1921fb,
+ 0x23a32e63, 0xbc77d556, 0xfcd23a30, 0x3b96b111,
+ 0xf621619c, 0x3fefffff, 0x51aeb57c, 0x3f2921fb,
+ 0xbbbd8fe6, 0xbc87507d, 0x4916c435, 0xbbca6e1d,
+ 0xd8858675, 0x3fefffff, 0x49ee4ea6, 0x3f3921fb,
+ 0x54748eab, 0xbc879f0e, 0x744a453e, 0x3bde894d,
+ 0x62161a34, 0x3fefffff, 0x2aecb360, 0x3f4921fb,
+ 0xb1f9b9c4, 0xbc6136dc, 0x7e566b4c, 0x3be87615,
+ 0x88586ee6, 0x3feffffd, 0xaee6472e, 0x3f5921fa,
+ 0xf173ae5b, 0x3c81af64, 0x284a9df8, 0xbbfee52e,
+ 0x21621d02, 0x3feffff6, 0xbecca4ba, 0x3f6921f8,
+ 0xebc82813, 0xbc76acfc, 0x7bcab5b2, 0x3c02ba40,
+ 0x858e8a92, 0x3fefffd8, 0xfe670071, 0x3f7921f0,
+ 0x1883bcf7, 0x3c8359c7, 0xfe6b7a9b, 0x3bfab967,
+ 0x169b92db, 0x3fefff62, 0xfcdec784, 0x3f8921d1,
+ 0xc81fbd0d, 0x3c85dda3, 0xbe836d9d, 0x3c29878e,
+ 0x6084cd0d, 0x3feffd88, 0xf7a3667e, 0x3f992155,
+ 0x4556e4cb, 0xbc81354d, 0x091a0130, 0xbbfb1d63,
+ 0xe3796d7e, 0x3feff621, 0xf10dd814, 0x3fa91f65,
+ 0x2e24aa15, 0xbc6c57bc, 0x0d569a90, 0xbc2912bd,
+ 0xa3d12526, 0x3fefd88d, 0xbc29b42c, 0x3fb917a6,
+ 0x378811c7, 0xbc887df6, 0xd26ed688, 0xbc3e2718,
+ 0xcff75cb0, 0x3fef6297, 0x3c69a60b, 0x3fc8f8b8,
+ 0x2a361fd3, 0x3c756217, 0xb9ff8d82, 0xbc626d19,
+ 0xcf328d46, 0x3fed906b, 0xa6aea963, 0x3fd87de2,
+ 0x10231ac2, 0x3c7457e6, 0xd3d5a610, 0xbc672ced,
+ 0x667f3bcd, 0x3fe6a09e, 0x667f3bcd, 0x3fe6a09e,
+ 0x13b26456, 0xbc8bdd34, 0x13b26456, 0xbc8bdd34,
+ 0x00000000, 0x00000000, 0x00000000, 0x3ff00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+int
+ffts_generate_cosine_sine_32f(ffts_cpx_32f *const table, int table_size)
+{
+ double alpha, beta;
+ double c[2], s[2];
+ double x, z;
+ int i;
+
+ if (!table || !table_size) {
+ return -1;
+ }
+
+ /* the first */
+ table[0][0] = 1.0f;
+ table[0][1] = -0.0f;
+
+ if (FFTS_UNLIKELY(table_size == 1)) {
+ goto exit;
+ }
+
+ if (FFTS_UNLIKELY(table_size == 2)) {
+ /* skip over */
+ i = 1;
+ goto mid_point;
+ }
+
+ /* polynomial approximations calculated using Sollya */
+ x = 1.0 / table_size;
+ z = x * x;
+
+ /* alpha = 2 * sin(M_PI_4 / m) * sin(M_PI_4 / m) */
+ alpha = x * (1.1107207345394952717884501203293686870741139540138 +
+ z * (-0.114191397993514079911985272577099412137126013186879 +
+ z * 3.52164670852685621720746817665316575239342815885835e-3));
+ alpha = alpha * alpha;
+
+ /* beta = sin(M_PI_2 / m) */
+ beta = x * (1.57079632679489455959753740899031981825828552246094 +
+ z * (-0.64596409735041482313988581154262647032737731933593 +
+ z * 7.9690915468332887416913479228242067620158195495605e-2));
+
+ /* cos(0) = 1.0, sin(0) = 0.0 */
+ c[0] = 1.0;
+ s[0] = 0.0;
+
+ /* generate sine and cosine tables with maximum error less than 1 ULP */
+ for (i = 1; i < (table_size + 1)/2; i++) {
+ c[1] = c[0] - ((alpha * c[0]) + (beta * s[0]));
+ s[1] = s[0] - ((alpha * s[0]) - (beta * c[0]));
+
+ table[i + 0][0] = (float) c[1];
+ table[i + 0][1] = (float) -s[1];
+ table[table_size - i][0] = (float) s[1];
+ table[table_size - i][1] = (float) -c[1];
+
+ c[0] = c[1];
+ s[0] = s[1];
+ }
+
+ if (FFTS_UNLIKELY(table_size & 1)) {
+ goto exit;
+ }
+
+mid_point:
+ table[i][0] = 0.70710677f;
+ table[i][1] = -0.70710677f;
+
+exit:
+ return 0;
+}
+
+/* Oscar Buneman's method for generating a sequence of sines and cosines.
+* Expired US Patent 4,878,187 A
+*
+* D. Potts, G. Steidl, M. Tasche, Numerical stability of fast
+* trigonometric transforms — a worst case study,
+* J. Concrete Appl. Math. 1 (2003) 1–36
+*
+* O. Buneman, Stable on–line creation of sines and cosines of
+* successive angles, Proc. IEEE 75, 1434 – 1435 (1987).
+*/
+#if HAVE_SSE2
+int
+ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size)
+{
+ static const __m128d sign_swap = { 0.0, -0.0 };
+ const __m128d *FFTS_RESTRICT ct;
+ const double *FFTS_RESTRICT hs;
+ __m128d FFTS_ALIGN(16) w[32];
+ __m128d FFTS_ALIGN(16) h[32];
+ int i, log_2, offset;
+
+ /* size must be a power of two */
+ if (!table || !table_size || (table_size & (table_size - 1))) {
+ return -1;
+ }
+
+ /* the first */
+ table[0][0] = 1.0f;
+ table[0][1] = -0.0f;
+
+ if (FFTS_UNLIKELY(table_size == 1)) {
+ goto exit;
+ }
+
+ if (FFTS_UNLIKELY(table_size == 2)) {
+ /* skip over */
+ i = 1;
+ goto mid_point;
+ }
+
+ /* calculate table offset */
+ FFTS_ASSUME(table_size/2 > 1);
+ log_2 = ffts_ctzl(table_size);
+ FFTS_ASSUME(log_2 > 1);
+ offset = 32 - log_2;
+ ct = (const __m128d*)
+ FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]);
+ hs = (const double*) &half_secant[4 * offset];
+
+ /* initialize from lookup table */
+ for (i = 0; i <= log_2; i++) {
+ w[i] = ct[2*i];
+
+ /* duplicate the high part */
+ h[i] = _mm_set1_pd(hs[2*i]);
+ }
+
+ /* generate sine and cosine tables with maximum error less than 0.5 ULP */
+ for (i = 1; i < table_size/2; i++) {
+ /* calculate trailing zeros in index */
+ log_2 = ffts_ctzl(i);
+
+ /* note that storing is not 16 byte aligned */
+ _mm_storel_pi((__m64*) &table[i + 0],
+ _mm_cvtpd_ps(_mm_or_pd(w[log_2], sign_swap)));
+ _mm_storel_pi((__m64*) &table[table_size - i], _mm_cvtpd_ps(
+ _mm_or_pd(_mm_shuffle_pd(w[log_2], w[log_2], 1), sign_swap)));
+
+ /* skip and find next trailing zero */
+ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2)));
+ w[log_2] = _mm_mul_pd(h[log_2], _mm_add_pd(w[log_2 + 1], w[offset]));
+ }
+
+mid_point:
+ table[i][0] = 0.70710677f;
+ table[i][1] = -0.70710677f;
+
+exit:
+ return 0;
+}
+
+int
+ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size)
+{
+ static const __m128d sign_swap = { 0.0, -0.0 };
+ const struct ffts_dd2_t *FFTS_RESTRICT ct;
+ const double *FFTS_RESTRICT hs;
+ struct ffts_dd2_t FFTS_ALIGN(16) w[32];
+ struct ffts_dd2_t FFTS_ALIGN(16) h[32];
+ struct ffts_dd2_t FFTS_ALIGN(16) sum;
+ int i, log_2, offset;
+
+ /* size must be a power of two */
+ if (!table || !table_size || (table_size & (table_size - 1))) {
+ return -1;
+ }
+
+ /* the first */
+ table[0][0] = 1.0;
+ table[0][1] = -0.0;
+
+ if (FFTS_UNLIKELY(table_size == 1)) {
+ goto exit;
+ }
+
+ if (FFTS_UNLIKELY(table_size == 2)) {
+ /* skip over */
+ i = 1;
+ goto mid_point;
+ }
+
+ /* calculate table offset */
+ FFTS_ASSUME(table_size/2 > 1);
+ log_2 = ffts_ctzl(table_size);
+ FFTS_ASSUME(log_2 > 1);
+ offset = 32 - log_2;
+ ct = (const struct ffts_dd2_t*)
+ FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]);
+ hs = (const double*) &half_secant[4 * offset];
+
+ /* initialize from lookup table */
+ for (i = 0; i <= log_2; i++) {
+ w[i] = ct[i];
+
+ /* duplicate the high and low parts */
+ h[i].hi = _mm_set1_pd(hs[2*i + 0]);
+ h[i].lo = _mm_set1_pd(hs[2*i + 1]);
+ }
+
+ /* generate sine and cosine tables with maximum error less than 0.5 ULP */
+ for (i = 1; i < table_size/2; i++) {
+ /* calculate trailing zeros in index */
+ log_2 = ffts_ctzl(i);
+
+ /* result of ffts_dd_mul_dd is normalized */
+ _mm_store_pd((double*) &table[i + 0],
+ _mm_or_pd(w[log_2].hi, sign_swap));
+ _mm_store_pd((double*) &table[table_size - i],
+ _mm_or_pd(_mm_shuffle_pd(w[log_2].hi, w[log_2].hi, 1), sign_swap));
+
+ /* skip and find next trailing zero */
+ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2)));
+ sum = ffts_dd2_add_dd2_unnormalized(&w[log_2 + 1], &w[offset]);
+ w[log_2] = ffts_dd2_mul_dd2(&h[log_2], &sum);
+ }
+
+mid_point:
+ table[i][0] = 0.707106781186547524;
+ table[i][1] = -0.707106781186547524;
+
+exit:
+ return 0;
+}
+#else
+int
+ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size)
+{
+ const ffts_cpx_64f *FFTS_RESTRICT ct;
+ const double *FFTS_RESTRICT hs;
+ ffts_cpx_64f FFTS_ALIGN(16) w[32];
+ int i, log_2, offset;
+
+ /* size must be a power of two */
+ if (!table || !table_size || (table_size & (table_size - 1))) {
+ return -1;
+ }
+
+ /* the first */
+ table[0][0] = 1.0f;
+ table[0][1] = -0.0f;
+
+ if (FFTS_UNLIKELY(table_size == 1)) {
+ goto exit;
+ }
+
+ if (FFTS_UNLIKELY(table_size == 2)) {
+ /* skip over */
+ i = 1;
+ goto mid_point;
+ }
+
+ /* calculate table offset */
+ FFTS_ASSUME(table_size/2 > 1);
+ log_2 = ffts_ctzl(table_size);
+ FFTS_ASSUME(log_2 > 1);
+ offset = 32 - log_2;
+ ct = (const ffts_cpx_64f*)
+ FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]);
+ hs = (const double*) &half_secant[4 * offset];
+
+ /* initialize from lookup table */
+ for (i = 0; i <= log_2; i++) {
+ w[i][0] = ct[2*i][0];
+ w[i][1] = ct[2*i][1];
+ }
+
+ /* generate sine and cosine tables with maximum error less than 0.5 ULP */
+ for (i = 1; i < table_size/2; i++) {
+ /* calculate trailing zeros in index */
+ log_2 = ffts_ctzl(i);
+
+ table[i + 0][0] = (float) w[log_2][0];
+ table[i + 0][1] = (float) -w[log_2][1];
+ table[table_size - i][0] = (float) w[log_2][1];
+ table[table_size - i][1] = (float) -w[log_2][0];
+
+ /* skip and find next trailing zero */
+ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2)));
+ w[log_2][0] = hs[2 * log_2] * (w[log_2 + 1][0] + w[offset][0]);
+ w[log_2][1] = hs[2 * log_2] * (w[log_2 + 1][1] + w[offset][1]);
+ }
+
+mid_point:
+ table[i][0] = 0.70710677f;
+ table[i][1] = -0.70710677f;
+
+exit:
+ return 0;
+}
+
+int
+ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size)
+{
+ const struct ffts_dd_t *FFTS_RESTRICT ct;
+ const struct ffts_dd_t *FFTS_RESTRICT hs;
+ struct ffts_dd_t FFTS_ALIGN(16) w[32][2];
+ int i, log_2, offset;
+
+ /* size must be a power of two */
+ if (!table || !table_size || (table_size & (table_size - 1))) {
+ return -1;
+ }
+
+ /* the first */
+ table[0][0] = 1.0;
+ table[0][1] = -0.0;
+
+ if (FFTS_UNLIKELY(table_size == 1)) {
+ goto exit;
+ }
+
+ if (FFTS_UNLIKELY(table_size == 2)) {
+ /* skip over */
+ i = 1;
+ goto mid_point;
+ }
+
+ /* calculate table offset */
+ FFTS_ASSUME(table_size/2 > 1);
+ log_2 = ffts_ctzl(table_size);
+ FFTS_ASSUME(log_2 > 1);
+ offset = 32 - log_2;
+ ct = (const struct ffts_dd_t*)
+ FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]);
+ hs = (const struct ffts_dd_t*) &half_secant[4 * offset];
+
+ /* initialize from lookup table */
+ for (i = 0; i <= log_2; i++) {
+ w[i][0].hi = ct[2*i + 0].hi;
+ w[i][0].lo = ct[2*i + 1].hi;
+ w[i][1].hi = ct[2*i + 0].lo;
+ w[i][1].lo = ct[2*i + 1].lo;
+ }
+
+ /* generate sine and cosine tables with maximum error less than 0.5 ULP */
+ for (i = 1; i < table_size/2; i++) {
+ /* calculate trailing zeros in index */
+ log_2 = ffts_ctzl(i);
+
+ /* result of ffts_dd_mul_dd is normalized */
+ table[i + 0][0] = w[log_2][0].hi;
+ table[i + 0][1] = -w[log_2][1].hi;
+ table[table_size - i][0] = w[log_2][1].hi;
+ table[table_size - i][1] = -w[log_2][0].hi;
+
+ /* skip and find next trailing zero */
+ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2)));
+ w[log_2][0] = ffts_dd_mul_dd(hs[log_2],
+ ffts_dd_add_dd_unnormalized(w[log_2 + 1][0], w[offset][0]));
+ w[log_2][1] = ffts_dd_mul_dd(hs[log_2],
+ ffts_dd_add_dd_unnormalized(w[log_2 + 1][1], w[offset][1]));
+ }
+
+mid_point:
+ table[i][0] = 0.707106781186547524;
+ table[i][1] = -0.707106781186547524;
+
+exit:
+ return 0;
+}
+#endif
+
+int
+ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p,
+ int sign,
+ int invert)
+{
+ const ffts_cpx_64f *FFTS_RESTRICT ct;
+ const double *FFTS_RESTRICT hs;
+ ffts_cpx_64f FFTS_ALIGN(16) w[32];
+ int i, log_2, offset, N;
+ float *A, *B;
+
+ if (!p) {
+ return -1;
+ }
+
+ A = (float*) FFTS_ASSUME_ALIGNED_32(p->A);
+ B = (float*) FFTS_ASSUME_ALIGNED_32(p->B);
+ N = (int) p->N;
+
+ /* the first */
+ if (sign < 0) {
+ A[0] = 0.5f;
+ A[1] = -0.5f;
+ B[0] = invert ? -0.5f : 0.5f;
+ B[1] = 0.5f;
+ } else {
+ /* peel of the first */
+ A[0] = 1.0f;
+ A[1] = invert ? 1.0f : -1.0f;
+ B[0] = 1.0f;
+ B[1] = 1.0f;
+ }
+
+ if (FFTS_UNLIKELY(N == 4)) {
+ i = 1;
+ goto last;
+ }
+
+ /* calculate table offset */
+ FFTS_ASSUME(N / 4 > 1);
+ log_2 = ffts_ctzl(N);
+ FFTS_ASSUME(log_2 > 2);
+ offset = 34 - log_2;
+ ct = (const ffts_cpx_64f*)
+ FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]);
+ hs = (const double*) &half_secant[4 * offset];
+
+ /* initialize from lookup table */
+ for (i = 0; i <= log_2; i++) {
+ w[i][0] = ct[2*i][0];
+ w[i][1] = ct[2*i][1];
+ }
+
+ /* generate sine and cosine tables with maximum error less than 0.5 ULP */
+ if (sign < 0) {
+ for (i = 1; i < N/4; i++) {
+ float t0, t1, t2;
+
+ /* calculate trailing zeros in index */
+ log_2 = ffts_ctzl(i);
+
+ t0 = (float) (0.5 * (1.0 - w[log_2][1]));
+ t1 = (float) (0.5 * w[log_2][0]);
+ t2 = (float) (0.5 * (1.0 + w[log_2][1]));
+
+ A[ 2 * i + 0] = t0;
+ A[N - 2 * i + 0] = t0;
+ A[ 2 * i + 1] = -t1;
+ A[N - 2 * i + 1] = t1;
+
+ B[ 2 * i + 0] = invert ? -t2 : t2;
+ B[N - 2 * i + 0] = invert ? -t2 : t2;
+ B[ 2 * i + 1] = t1;
+ B[N - 2 * i + 1] = -t1;
+
+ /* skip and find next trailing zero */
+ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2)));
+ w[log_2][0] = hs[2 * log_2] * (w[log_2 + 1][0] + w[offset][0]);
+ w[log_2][1] = hs[2 * log_2] * (w[log_2 + 1][1] + w[offset][1]);
+ }
+ } else {
+ for (i = 1; i < N/4; i++) {
+ float t0, t1, t2;
+
+ /* calculate trailing zeros in index */
+ log_2 = ffts_ctzl(i);
+
+ t0 = (float) (1.0 - w[log_2][1]);
+ t1 = (float) w[log_2][0];
+ t2 = (float) (1.0 + w[log_2][1]);
+
+ A[ 2 * i + 0] = t0;
+ A[N - 2 * i + 0] = t0;
+ A[ 2 * i + 1] = invert ? t1 : -t1;
+ A[N - 2 * i + 1] = invert ? -t1 : t1;
+
+ B[ 2 * i + 0] = t2;
+ B[N - 2 * i + 0] = t2;
+ B[ 2 * i + 1] = t1;
+ B[N - 2 * i + 1] = -t1;
+
+ /* skip and find next trailing zero */
+ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2)));
+ w[log_2][0] = hs[2 * log_2] * (w[log_2 + 1][0] + w[offset][0]);
+ w[log_2][1] = hs[2 * log_2] * (w[log_2 + 1][1] + w[offset][1]);
+ }
+ }
+
+last:
+ if (sign < 0) {
+ A[2 * i + 0] = 0.0f;
+ A[2 * i + 1] = 0.0f;
+ B[2 * i + 0] = invert ? -1.0f : 1.0f;
+ B[2 * i + 1] = 0.0f;
+ } else {
+ A[2 * i + 0] = 0.0f;
+ A[2 * i + 1] = 0.0f;
+ B[2 * i + 0] = 2.0f;
+ B[2 * i + 1] = 0.0f;
+ }
+
+ return 0;
+} \ No newline at end of file
diff --git a/lib/ffts/src/ffts_trig.h b/lib/ffts/src/ffts_trig.h
new file mode 100644
index 0000000..0b22738
--- /dev/null
+++ b/lib/ffts/src/ffts_trig.h
@@ -0,0 +1,56 @@
+/*
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2015, Jukka Ojanen <jukka.ojanen@kolumbus.fi>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef FFTS_TRIG_H
+#define FFTS_TRIG_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
+
+#include "ffts_internal.h"
+
+int
+ffts_generate_cosine_sine_32f(ffts_cpx_32f *const table, int table_size);
+
+int
+ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size);
+
+int
+ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size);
+
+int
+ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p,
+ int sign,
+ int invert);
+
+#endif /* FFTS_TRIG_H */
diff --git a/lib/ffts/src/macros-alpha.h b/lib/ffts/src/macros-alpha.h
index 06daf4a..f7795d4 100644
--- a/lib/ffts/src/macros-alpha.h
+++ b/lib/ffts/src/macros-alpha.h
@@ -1,206 +1,264 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2013, Michael J. Cree <mcree@orcon.net.nz>
- Copyright (c) 2012, 2013, Anthony M. Blake <amb@anthonix.com>
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2013, Michael J. Cree <mcree@orcon.net.nz>
+Copyright (c) 2012, 2013, Anthony M. Blake <amb@anthonix.com>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef __MACROS_ALPHA_H__
-#define __MACROS_ALPHA_H__
+*/
-#include <math.h>
+#ifndef FFTS_MACROS_ALPHA_H
+#define FFTS_MACROS_ALPHA_H
-#ifdef __alpha__
-#define restrict
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
#endif
-typedef struct {float r1, i1, r2, i2;} V;
+#include "ffts_attributes.h"
-#define FFTS_MALLOC(d,a) malloc(d)
-#define FFTS_FREE(d) free(d)
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
-#define VLIT4(f3,f2,f1,f0) ((V){f0,f1,f2,f3})
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
-static inline V VADD(V x, V y)
+typedef union {
+ struct {
+ float r1;
+ float i1;
+ float r2;
+ float i2;
+ } r;
+ uint32_t u[4];
+} V4SF;
+
+#define FFTS_MALLOC(d,a) (malloc(d))
+#define FFTS_FREE(d) (free(d))
+
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_LIT4(float f3, float f2, float f1, float f0)
{
- V z;
- z.r1 = x.r1 + y.r1;
- z.i1 = x.i1 + y.i1;
- z.r2 = x.r2 + y.r2;
- z.i2 = x.i2 + y.i2;
+ V4SF z;
+
+ z.r.r1 = f0;
+ z.r.i1 = f1;
+ z.r.r2 = f2;
+ z.r.i2 = f3;
+
return z;
}
-
-static inline V VSUB(V x, V y)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_ADD(V4SF x, V4SF y)
{
- V z;
- z.r1 = x.r1 - y.r1;
- z.i1 = x.i1 - y.i1;
- z.r2 = x.r2 - y.r2;
- z.i2 = x.i2 - y.i2;
+ V4SF z;
+
+ z.r.r1 = x.r.r1 + y.r.r1;
+ z.r.i1 = x.r.i1 + y.r.i1;
+ z.r.r2 = x.r.r2 + y.r.r2;
+ z.r.i2 = x.r.i2 + y.r.i2;
+
return z;
}
-
-static inline V VMUL(V x, V y)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_SUB(V4SF x, V4SF y)
{
- V z;
- z.r1 = x.r1 * y.r1;
- z.i1 = x.i1 * y.i1;
- z.r2 = x.r2 * y.r2;
- z.i2 = x.i2 * y.i2;
+ V4SF z;
+
+ z.r.r1 = x.r.r1 - y.r.r1;
+ z.r.i1 = x.r.i1 - y.r.i1;
+ z.r.r2 = x.r.r2 - y.r.r2;
+ z.r.i2 = x.r.i2 - y.r.i2;
+
return z;
}
-static inline V VXOR(V x, V y)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_MUL(V4SF x, V4SF y)
{
- V r;
- r.r1 = (uint32_t)x.r1 ^ (uint32_t)y.r1;
- r.i1 = (uint32_t)x.i1 ^ (uint32_t)y.i1;
- r.r2 = (uint32_t)x.r2 ^ (uint32_t)y.r2;
- r.i2 = (uint32_t)x.i2 ^ (uint32_t)y.i2;
- return r;
+ V4SF z;
+
+ z.r.r1 = x.r.r1 * y.r.r1;
+ z.r.i1 = x.r.i1 * y.r.i1;
+ z.r.r2 = x.r.r2 * y.r.r2;
+ z.r.i2 = x.r.i2 * y.r.i2;
+
+ return z;
}
-static inline V VSWAPPAIRS(V x)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_XOR(V4SF x, V4SF y)
{
- V z;
- z.r1 = x.i1;
- z.i1 = x.r1;
- z.r2 = x.i2;
- z.i2 = x.r2;
+ V4SF z;
+
+ z.u[0] = x.u[0] ^ y.u[0];
+ z.u[1] = x.u[1] ^ y.u[1];
+ z.u[2] = x.u[2] ^ y.u[2];
+ z.u[3] = x.u[3] ^ y.u[3];
+
return z;
}
-
-static inline V VBLEND(V x, V y)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_SWAP_PAIRS(V4SF x)
{
- V z;
- z.r1 = x.r1;
- z.i1 = x.i1;
- z.r2 = y.r2;
- z.i2 = y.i2;
+ V4SF z;
+
+ z.r.r1 = x.r.i1;
+ z.r.i1 = x.r.r1;
+ z.r.r2 = x.r.i2;
+ z.r.i2 = x.r.r2;
+
return z;
}
-static inline V VUNPACKHI(V x, V y)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_BLEND(V4SF x, V4SF y)
{
- V z;
- z.r1 = x.r2;
- z.i1 = x.i2;
- z.r2 = y.r2;
- z.i2 = y.i2;
+ V4SF z;
+
+ z.r.r1 = x.r.r1;
+ z.r.i1 = x.r.i1;
+ z.r.r2 = y.r.r2;
+ z.r.i2 = y.r.i2;
+
return z;
}
-static inline V VUNPACKLO(V x, V y)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_UNPACK_HI(V4SF x, V4SF y)
{
- V z;
- z.r1 = x.r1;
- z.i1 = x.i1;
- z.r2 = y.r1;
- z.i2 = y.i1;
+ V4SF z;
+
+ z.r.r1 = x.r.r2;
+ z.r.i1 = x.r.i2;
+ z.r.r2 = y.r.r2;
+ z.r.i2 = y.r.i2;
+
return z;
}
-static inline V VDUPRE(V x)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_UNPACK_LO(V4SF x, V4SF y)
{
- V z;
- z.r1 = x.r1;
- z.i1 = x.r1;
- z.r2 = x.r2;
- z.i2 = x.r2;
+ V4SF z;
+
+ z.r.r1 = x.r.r1;
+ z.r.i1 = x.r.i1;
+ z.r.r2 = y.r.r1;
+ z.r.i2 = y.r.i1;
+
return z;
}
-static inline V VDUPIM(V x)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_DUPLICATE_RE(V4SF x)
{
- V z;
- z.r1 = x.i1;
- z.i1 = x.i1;
- z.r2 = x.i2;
- z.i2 = x.i2;
+ V4SF z;
+
+ z.r.r1 = x.r.r1;
+ z.r.i1 = x.r.r1;
+ z.r.r2 = x.r.r2;
+ z.r.i2 = x.r.r2;
+
return z;
}
-static inline V IMUL(V d, V re, V im)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_DUPLICATE_IM(V4SF x)
{
- re = VMUL(re, d);
- im = VMUL(im, VSWAPPAIRS(d));
- return VSUB(re, im);
+ V4SF z;
+
+ z.r.r1 = x.r.i1;
+ z.r.i1 = x.r.i1;
+ z.r.r2 = x.r.i2;
+ z.r.i2 = x.r.i2;
+
+ return z;
}
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_IMUL(V4SF d, V4SF re, V4SF im)
+{
+ re = V4SF_MUL(re, d);
+ im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d));
+ return V4SF_SUB(re, im);
+}
-static inline V IMULJ(V d, V re, V im)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_IMULJ(V4SF d, V4SF re, V4SF im)
{
- re = VMUL(re, d);
- im = VMUL(im, VSWAPPAIRS(d));
- return VADD(re, im);
+ re = V4SF_MUL(re, d);
+ im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d));
+ return V4SF_ADD(re, im);
}
-static inline V MULI(int inv, V x)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_MULI(int inv, V4SF x)
{
- V z;
+ V4SF z;
if (inv) {
- z.r1 = -x.r1;
- z.i1 = x.i1;
- z.r2 = -x.r2;
- z.i2 = x.i2;
- }else{
- z.r1 = x.r1;
- z.i1 = -x.i1;
- z.r2 = x.r2;
- z.i2 = -x.i2;
+ z.r.r1 = -x.r.r1;
+ z.r.i1 = x.r.i1;
+ z.r.r2 = -x.r.r2;
+ z.r.i2 = x.r.i2;
+ } else {
+ z.r.r1 = x.r.r1;
+ z.r.i1 = -x.r.i1;
+ z.r.r2 = x.r.r2;
+ z.r.i2 = -x.r.i2;
}
+
return z;
}
-
-static inline V IMULI(int inv, V x)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_IMULI(int inv, V4SF x)
{
- return VSWAPPAIRS(MULI(inv, x));
+ return V4SF_SWAP_PAIRS(V4SF_MULI(inv, x));
}
-
-static inline V VLD(const void *s)
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_LD(const void *s)
{
- V *d = (V *)s;
- return *d;
+ V4SF z;
+ memcpy(&z, s, sizeof(z));
+ return z;
}
-
-static inline void VST(void *d, V s)
+static FFTS_ALWAYS_INLINE void
+V4SF_ST(void *d, V4SF s)
{
- V *r = (V *)d;
+ V4SF *r = (V4SF*) d;
*r = s;
}
-#endif
+#endif /* FFTS_MACROS_ALPHA_H */ \ No newline at end of file
diff --git a/lib/ffts/src/macros-altivec.h b/lib/ffts/src/macros-altivec.h
index 0d148a5..28f552f 100644
--- a/lib/ffts/src/macros-altivec.h
+++ b/lib/ffts/src/macros-altivec.h
@@ -135,3 +135,4 @@ static inline void VST(void *d, V s)
*r = s;
}
#endif
+// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3:
diff --git a/lib/ffts/src/macros-neon.h b/lib/ffts/src/macros-neon.h
index 0750b75..29aa49f 100644
--- a/lib/ffts/src/macros-neon.h
+++ b/lib/ffts/src/macros-neon.h
@@ -1,96 +1,119 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, 2013, Anthony M. Blake <amb@anthonix.com>
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, 2013, Anthony M. Blake <amb@anthonix.com>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __MACROS_NEON_H__
-#define __MACROS_NEON_H__
-#include "neon.h"
+#ifndef FFTS_MACROS_NEON_H
+#define FFTS_MACROS_NEON_H
+
#include <arm_neon.h>
-typedef float32x4_t V;
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
-typedef float32x4x2_t VS;
+#define FFTS_MALLOC(d,a) (valloc(d))
+#define FFTS_FREE(d) (free(d))
-#define ADD vaddq_f32
-#define SUB vsubq_f32
-#define MUL vmulq_f32
-#define VADD vaddq_f32
-#define VSUB vsubq_f32
-#define VMUL vmulq_f32
-#define VXOR(x,y) (vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), vreinterpretq_u32_f32(y))))
-#define VST vst1q_f32
-#define VLD vld1q_f32
-#define VST2 vst2q_f32
-#define VLD2 vld2q_f32
+typedef float32x4_t V4SF;
+typedef float32x4x2_t V4SF2;
-#define VSWAPPAIRS(x) (vrev64q_f32(x))
+#define V4SF_ADD vaddq_f32
+#define V4SF_SUB vsubq_f32
+#define V4SF_MUL vmulq_f32
-#define VUNPACKHI(a,b) (vcombine_f32(vget_high_f32(a), vget_high_f32(b)))
-#define VUNPACKLO(a,b) (vcombine_f32(vget_low_f32(a), vget_low_f32(b)))
+#define V4SF_XOR(x,y) \
+ (vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), vreinterpretq_u32_f32(y))))
-#define VBLEND(x,y) (vcombine_f32(vget_low_f32(x), vget_high_f32(y)))
+#define V4SF_ST vst1q_f32
+#define V4SF_LD vld1q_f32
-__INLINE V VLIT4(data_t f3, data_t f2, data_t f1, data_t f0) {
- data_t __attribute__ ((aligned(16))) d[4] = {f0, f1, f2, f3};
- return VLD(d);
-}
+#define V4SF_SWAP_PAIRS(x) \
+ (vrev64q_f32(x))
-#define VDUPRE(r) vcombine_f32(vdup_lane_f32(vget_low_f32(r),0), vdup_lane_f32(vget_high_f32(r),0))
-#define VDUPIM(r) vcombine_f32(vdup_lane_f32(vget_low_f32(r),1), vdup_lane_f32(vget_high_f32(r),1))
+#define V4SF_UNPACK_HI(a,b) \
+ (vcombine_f32(vget_high_f32(a), vget_high_f32(b)))
-#define FFTS_MALLOC(d,a) (valloc(d))
-#define FFTS_FREE(d) (free(d))
+#define V4SF_UNPACK_LO(a,b) \
+ (vcombine_f32(vget_low_f32(a), vget_low_f32(b)))
-__INLINE void STORESPR(data_t * addr, VS p) {
+#define V4SF_BLEND(x,y) \
+ (vcombine_f32(vget_low_f32(x), vget_high_f32(y)))
- vst1q_f32(addr, p.val[0]);
- vst1q_f32(addr + 4, p.val[1]);
-
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_LIT4(float f3, float f2, float f1, float f0)
+{
+ float FFTS_ALIGN(16) d[4] = {f0, f1, f2, f3};
+ return V4SF_LD(d);
}
-__INLINE V IMULI(int inv, V a) {
- if(inv) return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f)));
- else return VSWAPPAIRS(VXOR(a, VLIT4(-0.0f, 0.0f, -0.0f, 0.0f)));
+#define V4SF_DUPLICATE_RE(r) \
+ vcombine_f32(vdup_lane_f32(vget_low_f32(r),0), vdup_lane_f32(vget_high_f32(r),0))
+
+#define V4SF_DUPLICATE_IM(r) \
+ vcombine_f32(vdup_lane_f32(vget_low_f32(r),1), vdup_lane_f32(vget_high_f32(r),1))
+
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_IMULI(int inv, V4SF a)
+{
+ if (inv) {
+ return V4SF_SWAP_PAIRS(V4SF_XOR(a, V4SF_LIT4(0.0f, -0.0f, 0.0f, -0.0f)));
+ } else {
+ return V4SF_SWAP_PAIRS(V4SF_XOR(a, V4SF_LIT4(-0.0f, 0.0f, -0.0f, 0.0f)));
+ }
}
-__INLINE V IMUL(V d, V re, V im) {
- re = VMUL(re, d);
- im = VMUL(im, VSWAPPAIRS(d));
- return VSUB(re, im);
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_IMUL(V4SF d, V4SF re, V4SF im)
+{
+ re = V4SF_MUL(re, d);
+ im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d));
+ return V4SF_SUB(re, im);
}
-__INLINE V IMULJ(V d, V re, V im) {
- re = VMUL(re, d);
- im = VMUL(im, VSWAPPAIRS(d));
- return VADD(re, im);
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_IMULJ(V4SF d, V4SF re, V4SF im)
+{
+ re = V4SF_MUL(re, d);
+ im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d));
+ return V4SF_ADD(re, im);
}
-#endif
+#define V4SF2_ST vst2q_f32
+#define V4SF2_LD vld2q_f32
+
+static FFTS_ALWAYS_INLINE void
+V4SF2_STORE_SPR(float *addr, V4SF2 p)
+{
+ vst1q_f32(addr, p.val[0]);
+ vst1q_f32(addr + 4, p.val[1]);
+}
+
+#endif /* FFTS_MACROS_NEON_H */
diff --git a/lib/ffts/src/macros-sse.h b/lib/ffts/src/macros-sse.h
index 229477c..827aa67 100644
--- a/lib/ffts/src/macros-sse.h
+++ b/lib/ffts/src/macros-sse.h
@@ -1,84 +1,100 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __SSE_FLOAT_H__
-#define __SSE_FLOAT_H__
+#ifndef FFTS_MACROS_SSE_H
+#define FFTS_MACROS_SSE_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
#include <xmmintrin.h>
-//#define VL 4
+#define FFTS_MALLOC(d,a) (_mm_malloc(d,a))
+#define FFTS_FREE(d) (_mm_free(d))
-typedef __m128 V;
+typedef __m128 V4SF;
-#define VADD _mm_add_ps
-#define VSUB _mm_sub_ps
-#define VMUL _mm_mul_ps
-//#define VLIT4 _mm_set_ps
-#define VXOR _mm_xor_ps
-#define VST _mm_store_ps
-#define VLD _mm_load_ps
+#define V4SF_ADD _mm_add_ps
+#define V4SF_SUB _mm_sub_ps
+#define V4SF_MUL _mm_mul_ps
+#define V4SF_LIT4 _mm_set_ps
+#define V4SF_XOR _mm_xor_ps
+#define V4SF_ST _mm_store_ps
+#define V4SF_LD _mm_load_ps
-#define VSWAPPAIRS(x) (_mm_shuffle_ps(x,x,_MM_SHUFFLE(2,3,0,1)))
+#define V4SF_SWAP_PAIRS(x) \
+ (_mm_shuffle_ps(x, x, _MM_SHUFFLE(2,3,0,1)))
-#define VUNPACKHI(x,y) (_mm_shuffle_ps(x,y,_MM_SHUFFLE(3,2,3,2)))
-#define VUNPACKLO(x,y) (_mm_shuffle_ps(x,y,_MM_SHUFFLE(1,0,1,0)))
+#define V4SF_UNPACK_HI(x,y) \
+ (_mm_shuffle_ps(x, y, _MM_SHUFFLE(3,2,3,2)))
-#define VBLEND(x,y) (_mm_shuffle_ps(x,y,_MM_SHUFFLE(3,2,1,0)))
+#define V4SF_UNPACK_LO(x,y) \
+ (_mm_movelh_ps(x, y))
-#define VLIT4 _mm_set_ps
+#define V4SF_BLEND(x, y) \
+ (_mm_shuffle_ps(x, y, _MM_SHUFFLE(3,2,1,0)))
-#define VDUPRE(r) (_mm_shuffle_ps(r,r,_MM_SHUFFLE(2,2,0,0)))
-#define VDUPIM(r) (_mm_shuffle_ps(r,r,_MM_SHUFFLE(3,3,1,1)))
+#define V4SF_DUPLICATE_RE(r) \
+ (_mm_shuffle_ps(r, r, _MM_SHUFFLE(2,2,0,0)))
-#define FFTS_MALLOC(d,a) (_mm_malloc(d,a))
-#define FFTS_FREE(d) (_mm_free(d))
+#define V4SF_DUPLICATE_IM(r) \
+ (_mm_shuffle_ps(r, r, _MM_SHUFFLE(3,3,1,1)))
-__INLINE V IMULI(int inv, V a) {
- if(inv) return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f)));
- else return VSWAPPAIRS(VXOR(a, VLIT4(-0.0f, 0.0f, -0.0f, 0.0f)));
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_IMULI(int inv, V4SF a)
+{
+ if (inv) {
+ return V4SF_SWAP_PAIRS(V4SF_XOR(a, V4SF_LIT4(0.0f, -0.0f, 0.0f, -0.0f)));
+ } else {
+ return V4SF_SWAP_PAIRS(V4SF_XOR(a, V4SF_LIT4(-0.0f, 0.0f, -0.0f, 0.0f)));
+ }
}
-
-__INLINE V IMUL(V d, V re, V im) {
- re = VMUL(re, d);
- im = VMUL(im, VSWAPPAIRS(d));
- return VSUB(re, im);
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_IMUL(V4SF d, V4SF re, V4SF im)
+{
+ re = V4SF_MUL(re, d);
+ im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d));
+ return V4SF_SUB(re, im);
}
-__INLINE V IMULJ(V d, V re, V im) {
- re = VMUL(re, d);
- im = VMUL(im, VSWAPPAIRS(d));
- return VADD(re, im);
+static FFTS_ALWAYS_INLINE V4SF
+V4SF_IMULJ(V4SF d, V4SF re, V4SF im)
+{
+ re = V4SF_MUL(re, d);
+ im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d));
+ return V4SF_ADD(re, im);
}
-#endif
+#endif /* FFTS_MACROS_SSE_H */
diff --git a/lib/ffts/src/macros.h b/lib/ffts/src/macros.h
index d304cec..b755775 100644
--- a/lib/ffts/src/macros.h
+++ b/lib/ffts/src/macros.h
@@ -1,161 +1,204 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2013, Michael J. Cree <mcree@orcon.net.nz>
- Copyright (c) 2012, 2013, Anthony M. Blake <amb@anthonix.com>
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2013, Michael J. Cree <mcree@orcon.net.nz>
+Copyright (c) 2012, 2013, Anthony M. Blake <amb@anthonix.com>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __MACROS_H__
-#define __MACROS_H__
+#ifndef FFTS_MACROS_H
+#define FFTS_MACROS_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
#ifdef HAVE_NEON
#include "macros-neon.h"
+#elif HAVE_SSE
+#include "macros-sse.h"
+#elif __powerpc__
+#include "macros-altivec.h"
#else
-#ifdef __alpha__
#include "macros-alpha.h"
-#else
-#ifdef __powerpc__
-#include "macros-altivec.h"
-#endif
#endif
-#endif
+static FFTS_INLINE void
+V4SF_TX2(V4SF *a, V4SF *b)
+{
+ V4SF t0 = V4SF_UNPACK_LO(*a, *b);
+ V4SF t1 = V4SF_UNPACK_HI(*a, *b);
+ *a = t0;
+ *b = t1;
+}
+static FFTS_INLINE void
+V4SF_K_N(int inv,
+ V4SF re,
+ V4SF im,
+ V4SF *r0,
+ V4SF *r1,
+ V4SF *r2,
+ V4SF *r3)
+{
+ V4SF uk, uk2, zk_p, zk_n, zk, zk_d;
-#ifdef HAVE_VFP
-#include "macros-alpha.h"
-#endif
-#ifdef HAVE_SSE
- #include "macros-sse.h"
-#endif
+ uk = *r0;
+ uk2 = *r1;
-static inline void TX2(V *a, V *b)
-{
- V TX2_t0 = VUNPACKLO(*a, *b);
- V TX2_t1 = VUNPACKHI(*a, *b);
- *a = TX2_t0; *b = TX2_t1;
+ zk_p = V4SF_IMUL(*r2, re, im);
+ zk_n = V4SF_IMULJ(*r3, re, im);
+
+ zk = V4SF_ADD(zk_p, zk_n);
+ zk_d = V4SF_IMULI(inv, V4SF_SUB(zk_p, zk_n));
+
+ *r2 = V4SF_SUB(uk, zk);
+ *r0 = V4SF_ADD(uk, zk);
+ *r3 = V4SF_ADD(uk2, zk_d);
+ *r1 = V4SF_SUB(uk2, zk_d);
}
-static inline void K_N(int inv, V re, V im, V *r0, V *r1, V *r2, V *r3)
+static FFTS_INLINE void
+V4SF_L_2_4(int inv,
+ const float *FFTS_RESTRICT i0,
+ const float *FFTS_RESTRICT i1,
+ const float *FFTS_RESTRICT i2,
+ const float *FFTS_RESTRICT i3,
+ V4SF *r0,
+ V4SF *r1,
+ V4SF *r2,
+ V4SF *r3)
{
- V uk, uk2, zk_p, zk_n, zk, zk_d;
- uk = *r0; uk2 = *r1;
- zk_p = IMUL(*r2, re, im);
- zk_n = IMULJ(*r3, re, im);
-
- zk = VADD(zk_p, zk_n);
- zk_d = IMULI(inv, VSUB(zk_p, zk_n));
-
- *r2 = VSUB(uk, zk);
- *r0 = VADD(uk, zk);
- *r3 = VADD(uk2, zk_d);
- *r1 = VSUB(uk2, zk_d);
-}
+ V4SF t0, t1, t2, t3, t4, t5, t6, t7;
+ t0 = V4SF_LD(i0);
+ t1 = V4SF_LD(i1);
+ t2 = V4SF_LD(i2);
+ t3 = V4SF_LD(i3);
-static inline void S_4(V r0, V r1, V r2, V r3,
- data_t * restrict o0, data_t * restrict o1,
- data_t * restrict o2, data_t * restrict o3)
-{
- VST(o0, r0); VST(o1, r1); VST(o2, r2); VST(o3, r3);
-}
+ t4 = V4SF_ADD(t0, t1);
+ t5 = V4SF_SUB(t0, t1);
+ t6 = V4SF_ADD(t2, t3);
+ t7 = V4SF_SUB(t2, t3);
+ *r0 = V4SF_UNPACK_LO(t4, t5);
+ *r1 = V4SF_UNPACK_LO(t6, t7);
-static inline void L_2_4(int inv,
- const data_t * restrict i0, const data_t * restrict i1,
- const data_t * restrict i2, const data_t * restrict i3,
- V *r0, V *r1, V *r2, V *r3)
-{
- V t0, t1, t2, t3, t4, t5, t6, t7;
-
- t0 = VLD(i0); t1 = VLD(i1); t2 = VLD(i2); t3 = VLD(i3);
- t4 = VADD(t0, t1);
- t5 = VSUB(t0, t1);
- t6 = VADD(t2, t3);
- t7 = VSUB(t2, t3);
- *r0 = VUNPACKLO(t4, t5);
- *r1 = VUNPACKLO(t6, t7);
- t5 = IMULI(inv, t5);
- t0 = VADD(t6, t4);
- t2 = VSUB(t6, t4);
- t1 = VSUB(t7, t5);
- t3 = VADD(t7, t5);
- *r3 = VUNPACKHI(t0, t1);
- *r2 = VUNPACKHI(t2, t3);
-}
+ t5 = V4SF_IMULI(inv, t5);
+ t0 = V4SF_ADD(t6, t4);
+ t2 = V4SF_SUB(t6, t4);
+ t1 = V4SF_SUB(t7, t5);
+ t3 = V4SF_ADD(t7, t5);
-static inline void L_4_4(int inv,
- const data_t * restrict i0, const data_t * restrict i1,
- const data_t * restrict i2, const data_t * restrict i3,
- V *r0, V *r1, V *r2, V *r3)
-{
- V t0, t1, t2, t3, t4, t5, t6, t7;
-
- t0 = VLD(i0); t1 = VLD(i1); t2 = VLD(i2); t3 = VLD(i3);
- t4 = VADD(t0, t1);
- t5 = VSUB(t0, t1);
- t6 = VADD(t2, t3);
- t7 = IMULI(inv, VSUB(t2, t3));
- t0 = VADD(t4, t6);
- t2 = VSUB(t4, t6);
- t1 = VSUB(t5, t7);
- t3 = VADD(t5, t7);
- TX2(&t0, &t1);
- TX2(&t2, &t3);
- *r0 = t0; *r2 = t1; *r1 = t2; *r3 = t3;
+ *r3 = V4SF_UNPACK_HI(t0, t1);
+ *r2 = V4SF_UNPACK_HI(t2, t3);
}
+static FFTS_INLINE void
+V4SF_L_4_4(int inv,
+ const float *FFTS_RESTRICT i0,
+ const float *FFTS_RESTRICT i1,
+ const float *FFTS_RESTRICT i2,
+ const float *FFTS_RESTRICT i3,
+ V4SF *r0,
+ V4SF *r1,
+ V4SF *r2,
+ V4SF *r3)
+{
+ V4SF t0, t1, t2, t3, t4, t5, t6, t7;
+
+ t0 = V4SF_LD(i0);
+ t1 = V4SF_LD(i1);
+ t2 = V4SF_LD(i2);
+ t3 = V4SF_LD(i3);
+ t4 = V4SF_ADD(t0, t1);
+ t5 = V4SF_SUB(t0, t1);
+ t6 = V4SF_ADD(t2, t3);
+
+ t7 = V4SF_IMULI(inv, V4SF_SUB(t2, t3));
+
+ t0 = V4SF_ADD(t4, t6);
+ t2 = V4SF_SUB(t4, t6);
+ t1 = V4SF_SUB(t5, t7);
+ t3 = V4SF_ADD(t5, t7);
+
+ V4SF_TX2(&t0, &t1);
+ V4SF_TX2(&t2, &t3);
+
+ *r0 = t0;
+ *r2 = t1;
+ *r1 = t2;
+ *r3 = t3;
+}
-static inline void L_4_2(int inv,
- const data_t * restrict i0, const data_t * restrict i1,
- const data_t * restrict i2, const data_t * restrict i3,
- V *r0, V *r1, V *r2, V *r3)
+static FFTS_INLINE void
+V4SF_L_4_2(int inv,
+ const float *FFTS_RESTRICT i0,
+ const float *FFTS_RESTRICT i1,
+ const float *FFTS_RESTRICT i2,
+ const float *FFTS_RESTRICT i3,
+ V4SF *r0,
+ V4SF *r1,
+ V4SF *r2,
+ V4SF *r3)
{
- V t0, t1, t2, t3, t4, t5, t6, t7;
-
- t0 = VLD(i0); t1 = VLD(i1); t6 = VLD(i2); t7 = VLD(i3);
- t2 = VBLEND(t6, t7);
- t3 = VBLEND(t7, t6);
- t4 = VADD(t0, t1);
- t5 = VSUB(t0, t1);
- t6 = VADD(t2, t3);
- t7 = VSUB(t2, t3);
- *r2 = VUNPACKHI(t4, t5);
- *r3 = VUNPACKHI(t6, t7);
- t7 = IMULI(inv, t7);
- t0 = VADD(t4, t6);
- t2 = VSUB(t4, t6);
- t1 = VSUB(t5, t7);
- t3 = VADD(t5, t7);
- *r0 = VUNPACKLO(t0, t1);
- *r1 = VUNPACKLO(t2, t3);
+ V4SF t0, t1, t2, t3, t4, t5, t6, t7;
+
+ t0 = V4SF_LD(i0);
+ t1 = V4SF_LD(i1);
+ t6 = V4SF_LD(i2);
+ t7 = V4SF_LD(i3);
+
+ t2 = V4SF_BLEND(t6, t7);
+ t3 = V4SF_BLEND(t7, t6);
+
+ t4 = V4SF_ADD(t0, t1);
+ t5 = V4SF_SUB(t0, t1);
+ t6 = V4SF_ADD(t2, t3);
+ t7 = V4SF_SUB(t2, t3);
+
+ *r2 = V4SF_UNPACK_HI(t4, t5);
+ *r3 = V4SF_UNPACK_HI(t6, t7);
+
+ t7 = V4SF_IMULI(inv, t7);
+
+ t0 = V4SF_ADD(t4, t6);
+ t2 = V4SF_SUB(t4, t6);
+ t1 = V4SF_SUB(t5, t7);
+ t3 = V4SF_ADD(t5, t7);
+
+ *r0 = V4SF_UNPACK_LO(t0, t1);
+ *r1 = V4SF_UNPACK_LO(t2, t3);
}
-#endif
+
+#define V4SF_S_4(r0, r1, r2, r3, o0, o1, o2, o3) \
+ V4SF_ST(o0, r0); V4SF_ST(o1, r1); V4SF_ST(o2, r2); V4SF_ST(o3, r3);
+
+#endif /* FFTS_MACROS_H */ \ No newline at end of file
diff --git a/lib/ffts/src/neon.h b/lib/ffts/src/neon.h
index f3132c2..f719159 100644
--- a/lib/ffts/src/neon.h
+++ b/lib/ffts/src/neon.h
@@ -1,38 +1,38 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __NEON_H__
-#define __NEON_H__
+#ifndef FFTS_NEON_H
+#define FFTS_NEON_H
#include "ffts.h"
@@ -45,21 +45,19 @@ void neon_eo();
void neon_oe();
void neon_end();
-void neon_transpose(uint64_t *in, uint64_t *out, int w, int h);
-void neon_transpose_to_buf(uint64_t *in, uint64_t *out, int w);
-
-//typedef struct _ffts_plan_t ffts_plan_t;
+void neon_transpose4(uint64_t *in, uint64_t *out, int w, int h);
+void neon_transpose8(uint64_t *in, uint64_t *out, int w, int h);
-void neon_static_e_f(ffts_plan_t * , const void * , void * );
-void neon_static_o_f(ffts_plan_t * , const void * , void * );
-void neon_static_x4_f(float *, size_t, float *);
-void neon_static_x8_f(float *, size_t, float *);
-void neon_static_x8_t_f(float *, size_t, float *);
+void neon_static_e_f(ffts_plan_t*, const void*, void*);
+void neon_static_o_f(ffts_plan_t*, const void*, void*);
+void neon_static_x4_f(float*, const float*);
+void neon_static_x8_f(float*, size_t, const float*);
+void neon_static_x8_t_f(float*, size_t, const float*);
-void neon_static_e_i(ffts_plan_t * , const void * , void * );
-void neon_static_o_i(ffts_plan_t * , const void * , void * );
-void neon_static_x4_i(float *, size_t, float *);
-void neon_static_x8_i(float *, size_t, float *);
-void neon_static_x8_t_i(float *, size_t, float *);
+void neon_static_e_i(ffts_plan_t*, const void*, void*);
+void neon_static_o_i(ffts_plan_t*, const void*, void*);
+void neon_static_x4_i(float*, const float*);
+void neon_static_x8_i(float*, size_t, const float*);
+void neon_static_x8_t_i(float*, size_t, const float*);
-#endif
+#endif /* FFTS_NEON_H */
diff --git a/lib/ffts/src/neon.s b/lib/ffts/src/neon.s
index 6995066..7486b63 100644
--- a/lib/ffts/src/neon.s
+++ b/lib/ffts/src/neon.s
@@ -1,326 +1,323 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2016, Jukka Ojanen <jukka.ojanen@kolumbus.fi>
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+ .fpu neon
- .align 4
+ .align 4
#ifdef __APPLE__
- .globl _neon_x4
+ .globl _neon_x4
_neon_x4:
#else
- .globl neon_x4
+ .globl neon_x4
neon_x4:
#endif
-@ add r3, r0, #0
-
- vld1.32 {q8,q9}, [r0, :128]
- add r4, r0, r1, lsl #1
- vld1.32 {q10,q11}, [r4, :128]
- add r5, r0, r1, lsl #2
- vld1.32 {q12,q13}, [r5, :128]
- add r6, r4, r1, lsl #2
- vld1.32 {q14,q15}, [r6, :128]
- vld1.32 {q2,q3}, [r2, :128]
-
- vmul.f32 q0, q13, q3
- vmul.f32 q5, q12, q2
- vmul.f32 q1, q14, q2
- vmul.f32 q4, q14, q3
- vmul.f32 q14, q12, q3
- vmul.f32 q13, q13, q2
- vmul.f32 q12, q15, q3
- vmul.f32 q2, q15, q2
- vsub.f32 q0, q5, q0
- vadd.f32 q13, q13, q14
- vadd.f32 q12, q12, q1
- vsub.f32 q1, q2, q4
- vadd.f32 q15, q0, q12
- vsub.f32 q12, q0, q12
- vadd.f32 q14, q13, q1
- vsub.f32 q13, q13, q1
- vadd.f32 q0, q8, q15
- vadd.f32 q1, q9, q14
- vsub.f32 q2, q10, q13 @
- vsub.f32 q4, q8, q15
- vadd.f32 q3, q11, q12 @
- vst1.32 {q0,q1}, [r0, :128]
- vsub.f32 q5, q9, q14
- vadd.f32 q6, q10, q13 @
- vsub.f32 q7, q11, q12 @
- vst1.32 {q2,q3}, [r4, :128]
- vst1.32 {q4,q5}, [r5, :128]
- vst1.32 {q6,q7}, [r6, :128]
- bx lr
-
- .align 4
+ vld1.32 {q8, q9}, [r0, :128]
+ add r4, r0, r1, lsl #1
+ vld1.32 {q10, q11}, [r4, :128]
+ add r5, r0, r1, lsl #2
+ vld1.32 {q12, q13}, [r5, :128]
+ add r6, r4, r1, lsl #2
+ vld1.32 {q14, q15}, [r6, :128]
+ vld1.32 {q2, q3}, [r2, :128]
+
+ vmul.f32 q0, q13, q3
+ vmul.f32 q5, q12, q2
+ vmul.f32 q1, q14, q2
+ vmul.f32 q4, q14, q3
+ vmul.f32 q14, q12, q3
+ vmul.f32 q13, q13, q2
+ vmul.f32 q12, q15, q3
+ vmul.f32 q2, q15, q2
+ vsub.f32 q0, q5, q0
+ vadd.f32 q13, q13, q14
+ vadd.f32 q12, q12, q1
+ vsub.f32 q1, q2, q4
+ vadd.f32 q15, q0, q12
+ vsub.f32 q12, q0, q12
+ vadd.f32 q14, q13, q1
+ vsub.f32 q13, q13, q1
+ vadd.f32 q0, q8, q15
+ vadd.f32 q1, q9, q14
+ vsub.f32 q2, q10, q13
+ vsub.f32 q4, q8, q15
+ vadd.f32 q3, q11, q12
+ vst1.32 {q0, q1}, [r0, :128]
+ vsub.f32 q5, q9, q14
+ vadd.f32 q6, q10, q13
+ vsub.f32 q7, q11, q12
+ vst1.32 {q2, q3}, [r4, :128]
+ vst1.32 {q4, q5}, [r5, :128]
+ vst1.32 {q6, q7}, [r6, :128]
+ bx lr
+
+ .align 4
#ifdef __APPLE__
- .globl _neon_x8
+ .globl _neon_x8
_neon_x8:
#else
- .globl neon_x8
+ .globl neon_x8
neon_x8:
#endif
- mov r11, #0
- add r3, r0, #0 @ data0
- add r5, r0, r1, lsl #1 @ data2
- add r4, r0, r1 @ data1
- add r7, r5, r1, lsl #1 @ data4
- add r6, r5, r1 @ data3
- add r9, r7, r1, lsl #1 @ data6
- add r8, r7, r1 @ data5
- add r10, r9, r1 @ data7
- add r12, r2, #0 @ LUT
-
- sub r11, r11, r1, lsr #5
-neon_x8_loop:
- vld1.32 {q2,q3}, [r12, :128]!
- vld1.32 {q14,q15}, [r6, :128]
- vld1.32 {q10,q11}, [r5, :128]
- adds r11, r11, #1
- vmul.f32 q12, q15, q2
- vmul.f32 q8, q14, q3
- vmul.f32 q13, q14, q2
- vmul.f32 q9, q10, q3
- vmul.f32 q1, q10, q2
- vmul.f32 q0, q11, q2
- vmul.f32 q14, q11, q3
- vmul.f32 q15, q15, q3
- vld1.32 {q2,q3}, [r12, :128]!
- vsub.f32 q10, q12, q8
- vadd.f32 q11, q0, q9
- vadd.f32 q8, q15, q13
- vld1.32 {q12,q13}, [r4, :128]
- vsub.f32 q9, q1, q14
- vsub.f32 q15, q11, q10
- vsub.f32 q14, q9, q8
- vsub.f32 q4, q12, q15 @
- vadd.f32 q6, q12, q15 @
- vadd.f32 q5, q13, q14 @
- vsub.f32 q7, q13, q14 @
- vld1.32 {q14,q15}, [r9, :128]
- vld1.32 {q12,q13}, [r7, :128]
- vmul.f32 q1, q14, q2
- vmul.f32 q0, q14, q3
- vst1.32 {q4,q5}, [r4, :128]
- vmul.f32 q14, q15, q3
- vmul.f32 q4, q15, q2
- vadd.f32 q15, q9, q8
- vst1.32 {q6,q7}, [r6, :128]
- vmul.f32 q8, q12, q3
- vmul.f32 q5, q13, q3
- vmul.f32 q12, q12, q2
- vmul.f32 q9, q13, q2
- vadd.f32 q14, q14, q1
- vsub.f32 q13, q4, q0
- vadd.f32 q0, q9, q8
- vld1.32 {q8,q9}, [r3, :128]
- vadd.f32 q1, q11, q10
- vsub.f32 q12, q12, q5
- vadd.f32 q11, q8, q15
- vsub.f32 q8, q8, q15
- vadd.f32 q2, q12, q14
- vsub.f32 q10, q0, q13
- vadd.f32 q15, q0, q13
- vadd.f32 q13, q9, q1
- vsub.f32 q9, q9, q1
- vsub.f32 q12, q12, q14
- vadd.f32 q0, q11, q2
- vadd.f32 q1, q13, q15
- vsub.f32 q4, q11, q2
- vsub.f32 q2, q8, q10 @
- vadd.f32 q3, q9, q12 @
- vst1.32 {q0,q1}, [r3, :128]!
- vsub.f32 q5, q13, q15
- vld1.32 {q14,q15}, [r10, :128]
- vsub.f32 q7, q9, q12 @
- vld1.32 {q12,q13}, [r8, :128]
- vst1.32 {q2,q3}, [r5, :128]!
- vld1.32 {q2,q3}, [r12, :128]!
- vadd.f32 q6, q8, q10 @
- vmul.f32 q8, q14, q2
- vst1.32 {q4,q5}, [r7, :128]!
- vmul.f32 q10, q15, q3
- vmul.f32 q9, q13, q3
- vmul.f32 q11, q12, q2
- vmul.f32 q14, q14, q3
- vst1.32 {q6,q7}, [r9, :128]!
- vmul.f32 q15, q15, q2
- vmul.f32 q12, q12, q3
- vmul.f32 q13, q13, q2
- vadd.f32 q10, q10, q8
- vsub.f32 q11, q11, q9
- vld1.32 {q8,q9}, [r4, :128]
- vsub.f32 q14, q15, q14
- vadd.f32 q15, q13, q12
- vadd.f32 q13, q11, q10
- vadd.f32 q12, q15, q14
- vsub.f32 q15, q15, q14
- vsub.f32 q14, q11, q10
- vld1.32 {q10,q11}, [r6, :128]
- vadd.f32 q0, q8, q13
- vadd.f32 q1, q9, q12
- vsub.f32 q2, q10, q15 @
- vadd.f32 q3, q11, q14 @
- vsub.f32 q4, q8, q13
- vst1.32 {q0,q1}, [r4, :128]!
- vsub.f32 q5, q9, q12
- vadd.f32 q6, q10, q15 @
- vst1.32 {q2,q3}, [r6, :128]!
- vsub.f32 q7, q11, q14 @
- vst1.32 {q4,q5}, [r8, :128]!
- vst1.32 {q6,q7}, [r10, :128]!
- bne neon_x8_loop
-
- bx lr
-
- .align 4
+ mov r11, #0
+ add r3, r0, #0 @ data0
+ add r5, r0, r1, lsl #1 @ data2
+ add r4, r0, r1 @ data1
+ add r7, r5, r1, lsl #1 @ data4
+ add r6, r5, r1 @ data3
+ add r9, r7, r1, lsl #1 @ data6
+ add r8, r7, r1 @ data5
+ add r10, r9, r1 @ data7
+ add r12, r2, #0 @ LUT
+
+ sub r11, r11, r1, lsr #5
+1:
+ vld1.32 {q2, q3}, [r12, :128]!
+ vld1.32 {q14, q15}, [r6, :128]
+ vld1.32 {q10, q11}, [r5, :128]
+ adds r11, r11, #1
+ vmul.f32 q12, q15, q2
+ vmul.f32 q8, q14, q3
+ vmul.f32 q13, q14, q2
+ vmul.f32 q9, q10, q3
+ vmul.f32 q1, q10, q2
+ vmul.f32 q0, q11, q2
+ vmul.f32 q14, q11, q3
+ vmul.f32 q15, q15, q3
+ vld1.32 {q2, q3}, [r12, :128]!
+ vsub.f32 q10, q12, q8
+ vadd.f32 q11, q0, q9
+ vadd.f32 q8, q15, q13
+ vld1.32 {q12, q13}, [r4, :128]
+ vsub.f32 q9, q1, q14
+ vsub.f32 q15, q11, q10
+ vsub.f32 q14, q9, q8
+ vsub.f32 q4, q12, q15
+ vadd.f32 q6, q12, q15
+ vadd.f32 q5, q13, q14
+ vsub.f32 q7, q13, q14
+ vld1.32 {q14, q15}, [r9, :128]
+ vld1.32 {q12, q13}, [r7, :128]
+ vmul.f32 q1, q14, q2
+ vmul.f32 q0, q14, q3
+ vst1.32 {q4, q5}, [r4, :128]
+ vmul.f32 q14, q15, q3
+ vmul.f32 q4, q15, q2
+ vadd.f32 q15, q9, q8
+ vst1.32 {q6, q7}, [r6, :128]
+ vmul.f32 q8, q12, q3
+ vmul.f32 q5, q13, q3
+ vmul.f32 q12, q12, q2
+ vmul.f32 q9, q13, q2
+ vadd.f32 q14, q14, q1
+ vsub.f32 q13, q4, q0
+ vadd.f32 q0, q9, q8
+ vld1.32 {q8, q9}, [r3, :128]
+ vadd.f32 q1, q11, q10
+ vsub.f32 q12, q12, q5
+ vadd.f32 q11, q8, q15
+ vsub.f32 q8, q8, q15
+ vadd.f32 q2, q12, q14
+ vsub.f32 q10, q0, q13
+ vadd.f32 q15, q0, q13
+ vadd.f32 q13, q9, q1
+ vsub.f32 q9, q9, q1
+ vsub.f32 q12, q12, q14
+ vadd.f32 q0, q11, q2
+ vadd.f32 q1, q13, q15
+ vsub.f32 q4, q11, q2
+ vsub.f32 q2, q8, q10
+ vadd.f32 q3, q9, q12
+ vst1.32 {q0, q1}, [r3, :128]!
+ vsub.f32 q5, q13, q15
+ vld1.32 {q14, q15}, [r10, :128]
+ vsub.f32 q7, q9, q12
+ vld1.32 {q12, q13}, [r8, :128]
+ vst1.32 {q2, q3}, [r5, :128]!
+ vld1.32 {q2, q3}, [r12, :128]!
+ vadd.f32 q6, q8, q10
+ vmul.f32 q8, q14, q2
+ vst1.32 {q4, q5}, [r7, :128]!
+ vmul.f32 q10, q15, q3
+ vmul.f32 q9, q13, q3
+ vmul.f32 q11, q12, q2
+ vmul.f32 q14, q14, q3
+ vst1.32 {q6, q7}, [r9, :128]!
+ vmul.f32 q15, q15, q2
+ vmul.f32 q12, q12, q3
+ vmul.f32 q13, q13, q2
+ vadd.f32 q10, q10, q8
+ vsub.f32 q11, q11, q9
+ vld1.32 {q8, q9}, [r4, :128]
+ vsub.f32 q14, q15, q14
+ vadd.f32 q15, q13, q12
+ vadd.f32 q13, q11, q10
+ vadd.f32 q12, q15, q14
+ vsub.f32 q15, q15, q14
+ vsub.f32 q14, q11, q10
+ vld1.32 {q10, q11}, [r6, :128]
+ vadd.f32 q0, q8, q13
+ vadd.f32 q1, q9, q12
+ vsub.f32 q2, q10, q15
+ vadd.f32 q3, q11, q14
+ vsub.f32 q4, q8, q13
+ vst1.32 {q0, q1}, [r4, :128]!
+ vsub.f32 q5, q9, q12
+ vadd.f32 q6, q10, q15
+ vst1.32 {q2, q3}, [r6, :128]!
+ vsub.f32 q7, q11, q14
+ vst1.32 {q4, q5}, [r8, :128]!
+ vst1.32 {q6, q7}, [r10, :128]!
+ bne 1b
+ bx lr
+
+ .align 4
#ifdef __APPLE__
- .globl _neon_x8_t
+ .globl _neon_x8_t
_neon_x8_t:
#else
- .globl neon_x8_t
+ .globl neon_x8_t
neon_x8_t:
#endif
- mov r11, #0
- add r3, r0, #0 @ data0
- add r5, r0, r1, lsl #1 @ data2
- add r4, r0, r1 @ data1
- add r7, r5, r1, lsl #1 @ data4
- add r6, r5, r1 @ data3
- add r9, r7, r1, lsl #1 @ data6
- add r8, r7, r1 @ data5
- add r10, r9, r1 @ data7
- add r12, r2, #0 @ LUT
-
- sub r11, r11, r1, lsr #5
-neon_x8_t_loop:
- vld1.32 {q2,q3}, [r12, :128]!
- vld1.32 {q14,q15}, [r6, :128]
- vld1.32 {q10,q11}, [r5, :128]
- adds r11, r11, #1
- vmul.f32 q12, q15, q2
- vmul.f32 q8, q14, q3
- vmul.f32 q13, q14, q2
- vmul.f32 q9, q10, q3
- vmul.f32 q1, q10, q2
- vmul.f32 q0, q11, q2
- vmul.f32 q14, q11, q3
- vmul.f32 q15, q15, q3
- vld1.32 {q2,q3}, [r12, :128]!
- vsub.f32 q10, q12, q8
- vadd.f32 q11, q0, q9
- vadd.f32 q8, q15, q13
- vld1.32 {q12,q13}, [r4, :128]
- vsub.f32 q9, q1, q14
- vsub.f32 q15, q11, q10
- vsub.f32 q14, q9, q8
- vsub.f32 q4, q12, q15 @
- vadd.f32 q6, q12, q15 @
- vadd.f32 q5, q13, q14 @
- vsub.f32 q7, q13, q14 @
- vld1.32 {q14,q15}, [r9, :128]
- vld1.32 {q12,q13}, [r7, :128]
- vmul.f32 q1, q14, q2
- vmul.f32 q0, q14, q3
- vst1.32 {q4,q5}, [r4, :128]
- vmul.f32 q14, q15, q3
- vmul.f32 q4, q15, q2
- vadd.f32 q15, q9, q8
- vst1.32 {q6,q7}, [r6, :128]
- vmul.f32 q8, q12, q3
- vmul.f32 q5, q13, q3
- vmul.f32 q12, q12, q2
- vmul.f32 q9, q13, q2
- vadd.f32 q14, q14, q1
- vsub.f32 q13, q4, q0
- vadd.f32 q0, q9, q8
- vld1.32 {q8,q9}, [r3, :128]
- vadd.f32 q1, q11, q10
- vsub.f32 q12, q12, q5
- vadd.f32 q11, q8, q15
- vsub.f32 q8, q8, q15
- vadd.f32 q2, q12, q14
- vsub.f32 q10, q0, q13
- vadd.f32 q15, q0, q13
- vadd.f32 q13, q9, q1
- vsub.f32 q9, q9, q1
- vsub.f32 q12, q12, q14
- vadd.f32 q0, q11, q2
- vadd.f32 q1, q13, q15
- vsub.f32 q4, q11, q2
- vsub.f32 q2, q8, q10 @
- vadd.f32 q3, q9, q12 @
- vst2.32 {q0,q1}, [r3, :128]!
- vsub.f32 q5, q13, q15
- vld1.32 {q14,q15}, [r10, :128]
- vsub.f32 q7, q9, q12 @
- vld1.32 {q12,q13}, [r8, :128]
- vst2.32 {q2,q3}, [r5, :128]!
- vld1.32 {q2,q3}, [r12, :128]!
- vadd.f32 q6, q8, q10 @
- vmul.f32 q8, q14, q2
- vst2.32 {q4,q5}, [r7, :128]!
- vmul.f32 q10, q15, q3
- vmul.f32 q9, q13, q3
- vmul.f32 q11, q12, q2
- vmul.f32 q14, q14, q3
- vst2.32 {q6,q7}, [r9, :128]!
- vmul.f32 q15, q15, q2
- vmul.f32 q12, q12, q3
- vmul.f32 q13, q13, q2
- vadd.f32 q10, q10, q8
- vsub.f32 q11, q11, q9
- vld1.32 {q8,q9}, [r4, :128]
- vsub.f32 q14, q15, q14
- vadd.f32 q15, q13, q12
- vadd.f32 q13, q11, q10
- vadd.f32 q12, q15, q14
- vsub.f32 q15, q15, q14
- vsub.f32 q14, q11, q10
- vld1.32 {q10,q11}, [r6, :128]
- vadd.f32 q0, q8, q13
- vadd.f32 q1, q9, q12
- vsub.f32 q2, q10, q15 @
- vadd.f32 q3, q11, q14 @
- vsub.f32 q4, q8, q13
- vst2.32 {q0,q1}, [r4, :128]!
- vsub.f32 q5, q9, q12
- vadd.f32 q6, q10, q15 @
- vst2.32 {q2,q3}, [r6, :128]!
- vsub.f32 q7, q11, q14 @
- vst2.32 {q4,q5}, [r8, :128]!
- vst2.32 {q6,q7}, [r10, :128]!
- bne neon_x8_t_loop
-
- @bx lr
+ mov r11, #0
+ add r3, r0, #0 @ data0
+ add r5, r0, r1, lsl #1 @ data2
+ add r4, r0, r1 @ data1
+ add r7, r5, r1, lsl #1 @ data4
+ add r6, r5, r1 @ data3
+ add r9, r7, r1, lsl #1 @ data6
+ add r8, r7, r1 @ data5
+ add r10, r9, r1 @ data7
+ add r12, r2, #0 @ LUT
+
+ sub r11, r11, r1, lsr #5
+1:
+ vld1.32 {q2, q3}, [r12, :128]!
+ vld1.32 {q14, q15}, [r6, :128]
+ vld1.32 {q10, q11}, [r5, :128]
+ adds r11, r11, #1
+ vmul.f32 q12, q15, q2
+ vmul.f32 q8, q14, q3
+ vmul.f32 q13, q14, q2
+ vmul.f32 q9, q10, q3
+ vmul.f32 q1, q10, q2
+ vmul.f32 q0, q11, q2
+ vmul.f32 q14, q11, q3
+ vmul.f32 q15, q15, q3
+ vld1.32 {q2, q3}, [r12, :128]!
+ vsub.f32 q10, q12, q8
+ vadd.f32 q11, q0, q9
+ vadd.f32 q8, q15, q13
+ vld1.32 {q12, q13}, [r4, :128]
+ vsub.f32 q9, q1, q14
+ vsub.f32 q15, q11, q10
+ vsub.f32 q14, q9, q8
+ vsub.f32 q4, q12, q15
+ vadd.f32 q6, q12, q15
+ vadd.f32 q5, q13, q14
+ vsub.f32 q7, q13, q14
+ vld1.32 {q14, q15}, [r9, :128]
+ vld1.32 {q12, q13}, [r7, :128]
+ vmul.f32 q1, q14, q2
+ vmul.f32 q0, q14, q3
+ vst1.32 {q4, q5}, [r4, :128]
+ vmul.f32 q14, q15, q3
+ vmul.f32 q4, q15, q2
+ vadd.f32 q15, q9, q8
+ vst1.32 {q6, q7}, [r6, :128]
+ vmul.f32 q8, q12, q3
+ vmul.f32 q5, q13, q3
+ vmul.f32 q12, q12, q2
+ vmul.f32 q9, q13, q2
+ vadd.f32 q14, q14, q1
+ vsub.f32 q13, q4, q0
+ vadd.f32 q0, q9, q8
+ vld1.32 {q8, q9}, [r3, :128]
+ vadd.f32 q1, q11, q10
+ vsub.f32 q12, q12, q5
+ vadd.f32 q11, q8, q15
+ vsub.f32 q8, q8, q15
+ vadd.f32 q2, q12, q14
+ vsub.f32 q10, q0, q13
+ vadd.f32 q15, q0, q13
+ vadd.f32 q13, q9, q1
+ vsub.f32 q9, q9, q1
+ vsub.f32 q12, q12, q14
+ vadd.f32 q0, q11, q2
+ vadd.f32 q1, q13, q15
+ vsub.f32 q4, q11, q2
+ vsub.f32 q2, q8, q10
+ vadd.f32 q3, q9, q12
+ vst2.32 {q0, q1}, [r3, :128]!
+ vsub.f32 q5, q13, q15
+ vld1.32 {q14, q15}, [r10, :128]
+ vsub.f32 q7, q9, q12
+ vld1.32 {q12, q13}, [r8, :128]
+ vst2.32 {q2, q3}, [r5, :128]!
+ vld1.32 {q2, q3}, [r12, :128]!
+ vadd.f32 q6, q8, q10
+ vmul.f32 q8, q14, q2
+ vst2.32 {q4, q5}, [r7, :128]!
+ vmul.f32 q10, q15, q3
+ vmul.f32 q9, q13, q3
+ vmul.f32 q11, q12, q2
+ vmul.f32 q14, q14, q3
+ vst2.32 {q6, q7}, [r9, :128]!
+ vmul.f32 q15, q15, q2
+ vmul.f32 q12, q12, q3
+ vmul.f32 q13, q13, q2
+ vadd.f32 q10, q10, q8
+ vsub.f32 q11, q11, q9
+ vld1.32 {q8, q9}, [r4, :128]
+ vsub.f32 q14, q15, q14
+ vadd.f32 q15, q13, q12
+ vadd.f32 q13, q11, q10
+ vadd.f32 q12, q15, q14
+ vsub.f32 q15, q15, q14
+ vsub.f32 q14, q11, q10
+ vld1.32 {q10, q11}, [r6, :128]
+ vadd.f32 q0, q8, q13
+ vadd.f32 q1, q9, q12
+ vsub.f32 q2, q10, q15
+ vadd.f32 q3, q11, q14
+ vsub.f32 q4, q8, q13
+ vst2.32 {q0, q1}, [r4, :128]!
+ vsub.f32 q5, q9, q12
+ vadd.f32 q6, q10, q15
+ vst2.32 {q2, q3}, [r6, :128]!
+ vsub.f32 q7, q11, q14
+ vst2.32 {q4, q5}, [r8, :128]!
+ vst2.32 {q6, q7}, [r10, :128]!
+ bne 1b
@ assumes r0 = out
@ r1 = in ?
@@ -329,80 +326,80 @@ neon_x8_t_loop:
@ r3-r10 = data pointers
@ r11 = loop iterations
@ r2 & lr = temps
- .align 4
+ .align 4
#ifdef __APPLE__
- .globl _neon_ee
+ .globl _neon_ee
_neon_ee:
#else
- .globl neon_ee
+ .globl neon_ee
neon_ee:
#endif
- vld1.32 {d16, d17}, [r2, :128]
-_neon_ee_loop:
- vld2.32 {q15}, [r10, :128]!
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vld2.32 {q9}, [r4, :128]!
- vld2.32 {q10}, [r3, :128]!
- vld2.32 {q11}, [r6, :128]!
- vld2.32 {q12}, [r5, :128]!
- vsub.f32 q1, q14, q13
- vld2.32 {q0}, [r9, :128]!
- subs r11, r11, #1
- vsub.f32 q2, q0, q15
- vadd.f32 q0, q0, q15
- vmul.f32 d10, d2, d17
- vmul.f32 d11, d3, d16
- vmul.f32 d12, d3, d17
- vmul.f32 d6, d4, d17
- vmul.f32 d7, d5, d16
- vmul.f32 d8, d4, d16
- vmul.f32 d9, d5, d17
- vmul.f32 d13, d2, d16
- vsub.f32 d7, d7, d6
- vadd.f32 d11, d11, d10
- vsub.f32 q1, q12, q11
- vsub.f32 q2, q10, q9
- vadd.f32 d6, d9, d8
- vadd.f32 q4, q14, q13
- vadd.f32 q11, q12, q11
- vadd.f32 q12, q10, q9
- vsub.f32 d10, d13, d12
- vsub.f32 q7, q4, q0
- vsub.f32 q9, q12, q11
- vsub.f32 q13, q5, q3
- vadd.f32 d29, d5, d2 @
- vadd.f32 q5, q5, q3
- vadd.f32 q10, q4, q0
- vadd.f32 q11, q12, q11
- vsub.f32 d31, d5, d2 @
- vsub.f32 d28, d4, d3 @
- vadd.f32 d30, d4, d3 @
- vadd.f32 d5, d19, d14 @-
- vadd.f32 d7, d31, d26 @-
- vadd.f32 q1, q14, q5
- vadd.f32 q0, q11, q10
- vsub.f32 d6, d30, d27 @-
- vsub.f32 d4, d18, d15 @-
- vsub.f32 d13, d19, d14 @-
- vadd.f32 d12, d18, d15 @-
- vsub.f32 d15, d31, d26 @-
- ldr r2, [r12], #4
- vtrn.32 q1, q3
- ldr lr, [r12], #4
- vtrn.32 q0, q2
- add r2, r0, r2, lsl #2
- vsub.f32 q4, q11, q10
- add lr, r0, lr, lsl #2
- vsub.f32 q5, q14, q5
- vadd.f32 d14, d30, d27 @-
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_ee_loop
+ vld1.32 {d16, d17}, [r2, :128]
+1:
+ vld2.32 {q15}, [r10, :128]!
+ vld2.32 {q13}, [r8, :128]!
+ vld2.32 {q14}, [r7, :128]!
+ vld2.32 {q9}, [r4, :128]!
+ vld2.32 {q10}, [r3, :128]!
+ vld2.32 {q11}, [r6, :128]!
+ vld2.32 {q12}, [r5, :128]!
+ vsub.f32 q1, q14, q13
+ vld2.32 {q0}, [r9, :128]!
+ subs r11, r11, #1
+ vsub.f32 q2, q0, q15
+ vadd.f32 q0, q0, q15
+ vmul.f32 d10, d2, d17
+ vmul.f32 d11, d3, d16
+ vmul.f32 d12, d3, d17
+ vmul.f32 d6, d4, d17
+ vmul.f32 d7, d5, d16
+ vmul.f32 d8, d4, d16
+ vmul.f32 d9, d5, d17
+ vmul.f32 d13, d2, d16
+ vsub.f32 d7, d7, d6
+ vadd.f32 d11, d11, d10
+ vsub.f32 q1, q12, q11
+ vsub.f32 q2, q10, q9
+ vadd.f32 d6, d9, d8
+ vadd.f32 q4, q14, q13
+ vadd.f32 q11, q12, q11
+ vadd.f32 q12, q10, q9
+ vsub.f32 d10, d13, d12
+ vsub.f32 q7, q4, q0
+ vsub.f32 q9, q12, q11
+ vsub.f32 q13, q5, q3
+ vadd.f32 d29, d5, d2
+ vadd.f32 q5, q5, q3
+ vadd.f32 q10, q4, q0
+ vadd.f32 q11, q12, q11
+ vsub.f32 d31, d5, d2
+ vsub.f32 d28, d4, d3
+ vadd.f32 d30, d4, d3
+ vadd.f32 d5, d19, d14
+ vadd.f32 d7, d31, d26
+ vadd.f32 q1, q14, q5
+ vadd.f32 q0, q11, q10
+ vsub.f32 d6, d30, d27
+ vsub.f32 d4, d18, d15
+ vsub.f32 d13, d19, d14
+ vadd.f32 d12, d18, d15
+ vsub.f32 d15, d31, d26
+ ldr r2, [r12], #4
+ vtrn.32 q1, q3
+ ldr lr, [r12], #4
+ vtrn.32 q0, q2
+ add r2, r0, r2, lsl #2
+ vsub.f32 q4, q11, q10
+ add lr, r0, lr, lsl #2
+ vsub.f32 q5, q14, q5
+ vadd.f32 d14, d30, d27
+ vst2.32 {q0, q1}, [r2, :128]!
+ vst2.32 {q2, q3}, [lr, :128]!
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+ vst2.32 {q4, q5}, [r2, :128]!
+ vst2.32 {q6, q7}, [lr, :128]!
+ bne 1b
@ assumes r0 = out
@
@@ -410,57 +407,57 @@ _neon_ee_loop:
@ r3-r10 = data pointers
@ r11 = loop iterations
@ r2 & lr = temps
- .align 4
+ .align 4
#ifdef __APPLE__
- .globl _neon_oo
+ .globl _neon_oo
_neon_oo:
#else
- .globl neon_oo
+ .globl neon_oo
neon_oo:
#endif
-_neon_oo_loop:
- vld2.32 {q8}, [r6, :128]!
- vld2.32 {q9}, [r5, :128]!
- vld2.32 {q10}, [r4, :128]!
- vld2.32 {q13}, [r3, :128]!
- vadd.f32 q11, q9, q8
- vsub.f32 q8, q9, q8
- vsub.f32 q9, q13, q10
- vadd.f32 q12, q13, q10
- subs r11, r11, #1
- vld2.32 {q10}, [r7, :128]!
- vld2.32 {q13}, [r9, :128]!
- vsub.f32 q2, q12, q11
- vsub.f32 d7, d19, d16 @
- vadd.f32 d3, d19, d16 @
- vadd.f32 d6, d18, d17 @
- vsub.f32 d2, d18, d17 @
- vld2.32 {q9}, [r8, :128]!
- vld2.32 {q8}, [r10, :128]!
- vadd.f32 q0, q12, q11
- vadd.f32 q11, q13, q8
- vadd.f32 q12, q10, q9
- vsub.f32 q8, q13, q8
- vsub.f32 q9, q10, q9
- vsub.f32 q6, q12, q11
- vadd.f32 q4, q12, q11
- vtrn.32 q0, q2
- ldr r2, [r12], #4
- vsub.f32 d15, d19, d16 @
- ldr lr, [r12], #4
- vadd.f32 d11, d19, d16 @
- vadd.f32 d14, d18, d17 @
- vsub.f32 d10, d18, d17 @
- add r2, r0, r2, lsl #2
- vtrn.32 q1, q3
- add lr, r0, lr, lsl #2
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_oo_loop
+1:
+ vld2.32 {q8}, [r6, :128]!
+ vld2.32 {q9}, [r5, :128]!
+ vld2.32 {q10}, [r4, :128]!
+ vld2.32 {q13}, [r3, :128]!
+ vadd.f32 q11, q9, q8
+ vsub.f32 q8, q9, q8
+ vsub.f32 q9, q13, q10
+ vadd.f32 q12, q13, q10
+ subs r11, r11, #1
+ vld2.32 {q10}, [r7, :128]!
+ vld2.32 {q13}, [r9, :128]!
+ vsub.f32 q2, q12, q11
+ vsub.f32 d7, d19, d16
+ vadd.f32 d3, d19, d16
+ vadd.f32 d6, d18, d17
+ vsub.f32 d2, d18, d17
+ vld2.32 {q9}, [r8, :128]!
+ vld2.32 {q8}, [r10, :128]!
+ vadd.f32 q0, q12, q11
+ vadd.f32 q11, q13, q8
+ vadd.f32 q12, q10, q9
+ vsub.f32 q8, q13, q8
+ vsub.f32 q9, q10, q9
+ vsub.f32 q6, q12, q11
+ vadd.f32 q4, q12, q11
+ vtrn.32 q0, q2
+ ldr r2, [r12], #4
+ vsub.f32 d15, d19, d16
+ ldr lr, [r12], #4
+ vadd.f32 d11, d19, d16
+ vadd.f32 d14, d18, d17
+ vsub.f32 d10, d18, d17
+ add r2, r0, r2, lsl #2
+ vtrn.32 q1, q3
+ add lr, r0, lr, lsl #2
+ vst2.32 {q0, q1}, [r2, :128]!
+ vst2.32 {q2, q3}, [lr, :128]!
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+ vst2.32 {q4, q5}, [r2, :128]!
+ vst2.32 {q6, q7}, [lr, :128]!
+ bne 1b
@ assumes r0 = out
@
@@ -468,81 +465,80 @@ _neon_oo_loop:
@ r3-r10 = data pointers
@ r11 = addr of twiddle
@ r2 & lr = temps
- .align 4
+ .align 4
#ifdef __APPLE__
- .globl _neon_eo
+ .globl _neon_eo
_neon_eo:
#else
- .globl neon_eo
+ .globl neon_eo
neon_eo:
#endif
- vld2.32 {q9}, [r5, :128]! @tag2
- vld2.32 {q13}, [r3, :128]! @tag0
- vld2.32 {q12}, [r4, :128]! @tag1
- vld2.32 {q0}, [r7, :128]! @tag4
- vsub.f32 q11, q13, q12
- vld2.32 {q8}, [r6, :128]! @tag3
- vadd.f32 q12, q13, q12
- vsub.f32 q10, q9, q8
- vadd.f32 q8, q9, q8
- vadd.f32 q9, q12, q8
- vadd.f32 d9, d23, d20 @
- vsub.f32 d11, d23, d20 @
- vsub.f32 q8, q12, q8
- vsub.f32 d8, d22, d21 @
- vadd.f32 d10, d22, d21 @
- ldr r2, [r12], #4
- vld1.32 {d20, d21}, [r11, :128]
- ldr lr, [r12], #4
- vtrn.32 q9, q4
- add r2, r0, r2, lsl #2
- vtrn.32 q8, q5
- add lr, r0, lr, lsl #2
- vswp d9,d10
- vst1.32 {d8,d9,d10,d11}, [lr, :128]!
- vld2.32 {q13}, [r10, :128]! @tag7
- vld2.32 {q15}, [r9, :128]! @tag6
- vld2.32 {q11}, [r8, :128]! @tag5
- vsub.f32 q14, q15, q13
- vsub.f32 q12, q0, q11
- vadd.f32 q11, q0, q11
- vadd.f32 q13, q15, q13
- vadd.f32 d13, d29, d24 @
- vadd.f32 q15, q13, q11
- vsub.f32 d12, d28, d25 @
- vsub.f32 d15, d29, d24 @
- vadd.f32 d14, d28, d25 @
- vtrn.32 q15, q6
- vsub.f32 q15, q13, q11
- vtrn.32 q15, q7
- vswp d13, d14
- vst1.32 {d12,d13,d14,d15}, [lr, :128]!
- vtrn.32 q13, q14
- vtrn.32 q11, q12
- vmul.f32 d24, d26, d21
- vmul.f32 d28, d27, d20
- vmul.f32 d25, d26, d20
- vmul.f32 d26, d27, d21
- vmul.f32 d27, d22, d21
- vmul.f32 d30, d23, d20
- vmul.f32 d29, d23, d21
- vmul.f32 d22, d22, d20
- vsub.f32 d21, d28, d24
- vadd.f32 d20, d26, d25
- vadd.f32 d25, d30, d27
- vsub.f32 d24, d22, d29
- vadd.f32 q11, q12, q10
- vsub.f32 q10, q12, q10
- vadd.f32 q0, q9, q11
- vsub.f32 q2, q9, q11
- vadd.f32 d3, d17, d20 @
- vsub.f32 d7, d17, d20 @
- vsub.f32 d2, d16, d21 @
- vadd.f32 d6, d16, d21 @
- vswp d1, d2
- vswp d5, d6
- vstmia r2!, {q0-q3}
-
+ vld2.32 {q9}, [r5, :128]!
+ vld2.32 {q13}, [r3, :128]!
+ vld2.32 {q12}, [r4, :128]!
+ vld2.32 {q0}, [r7, :128]!
+ vsub.f32 q11, q13, q12
+ vld2.32 {q8}, [r6, :128]!
+ vadd.f32 q12, q13, q12
+ vsub.f32 q10, q9, q8
+ vadd.f32 q8, q9, q8
+ vadd.f32 q9, q12, q8
+ vadd.f32 d9, d23, d20
+ vsub.f32 d11, d23, d20
+ vsub.f32 q8, q12, q8
+ vsub.f32 d8, d22, d21
+ vadd.f32 d10, d22, d21
+ ldr r2, [r12], #4
+ vld1.32 {d20, d21}, [r11, :128]
+ ldr lr, [r12], #4
+ vtrn.32 q9, q4
+ add r2, r0, r2, lsl #2
+ vtrn.32 q8, q5
+ add lr, r0, lr, lsl #2
+ vswp d9, d10
+ vst1.32 {d8, d9, d10, d11}, [lr, :128]!
+ vld2.32 {q13}, [r10, :128]!
+ vld2.32 {q15}, [r9, :128]!
+ vld2.32 {q11}, [r8, :128]!
+ vsub.f32 q14, q15, q13
+ vsub.f32 q12, q0, q11
+ vadd.f32 q11, q0, q11
+ vadd.f32 q13, q15, q13
+ vadd.f32 d13, d29, d24
+ vadd.f32 q15, q13, q11
+ vsub.f32 d12, d28, d25
+ vsub.f32 d15, d29, d24
+ vadd.f32 d14, d28, d25
+ vtrn.32 q15, q6
+ vsub.f32 q15, q13, q11
+ vtrn.32 q15, q7
+ vswp d13, d14
+ vst1.32 {d12, d13, d14, d15}, [lr, :128]!
+ vtrn.32 q13, q14
+ vtrn.32 q11, q12
+ vmul.f32 d24, d26, d21
+ vmul.f32 d28, d27, d20
+ vmul.f32 d25, d26, d20
+ vmul.f32 d26, d27, d21
+ vmul.f32 d27, d22, d21
+ vmul.f32 d30, d23, d20
+ vmul.f32 d29, d23, d21
+ vmul.f32 d22, d22, d20
+ vsub.f32 d21, d28, d24
+ vadd.f32 d20, d26, d25
+ vadd.f32 d25, d30, d27
+ vsub.f32 d24, d22, d29
+ vadd.f32 q11, q12, q10
+ vsub.f32 q10, q12, q10
+ vadd.f32 q0, q9, q11
+ vsub.f32 q2, q9, q11
+ vadd.f32 d3, d17, d20
+ vsub.f32 d7, d17, d20
+ vsub.f32 d2, d16, d21
+ vadd.f32 d6, d16, d21
+ vswp d1, d2
+ vswp d5, d6
+ vstmia r2!, {q0-q3}
@ assumes r0 = out
@
@@ -550,189 +546,385 @@ neon_eo:
@ r3-r10 = data pointers
@ r11 = addr of twiddle
@ r2 & lr = temps
- .align 4
+ .align 4
#ifdef __APPLE__
- .globl _neon_oe
+ .globl _neon_oe
_neon_oe:
#else
- .globl neon_oe
+ .globl neon_oe
neon_oe:
#endif
- vld1.32 {q8}, [r5, :128]!
- vld1.32 {q10}, [r6, :128]!
- vld2.32 {q11}, [r4, :128]!
- vld2.32 {q13}, [r3, :128]!
- vld2.32 {q15}, [r10, :128]!
- vorr d25, d17, d17
- vorr d24, d20, d20
- vorr d20, d16, d16
- vsub.f32 q9, q13, q11
- vadd.f32 q11, q13, q11
- ldr r2, [r12], #4
- vtrn.32 d24, d25
- ldr lr, [r12], #4
- vtrn.32 d20, d21
- add r2, r0, r2, lsl #2
- vsub.f32 q8, q10, q12
- add lr, r0, lr, lsl #2
- vadd.f32 q10, q10, q12
- vadd.f32 q0, q11, q10
- vadd.f32 d25, d19, d16 @
- vsub.f32 d27, d19, d16 @
- vsub.f32 q1, q11, q10
- vsub.f32 d24, d18, d17 @
- vadd.f32 d26, d18, d17 @
- vtrn.32 q0, q12
- vtrn.32 q1, q13
- vld1.32 {d24, d25}, [r11, :128]
- vswp d1, d2
- vst1.32 {q0, q1}, [r2, :128]!
- vld2.32 {q0}, [r9, :128]!
- vadd.f32 q1, q0, q15
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vsub.f32 q15, q0, q15
- vsub.f32 q0, q14, q13
- vadd.f32 q3, q14, q13
- vadd.f32 q2, q3, q1
- vadd.f32 d29, d1, d30 @
- vsub.f32 d27, d1, d30 @
- vsub.f32 q3, q3, q1
- vsub.f32 d28, d0, d31 @
- vadd.f32 d26, d0, d31 @
- vtrn.32 q2, q14
- vtrn.32 q3, q13
- vswp d5, d6
- vst1.32 {q2, q3}, [r2, :128]!
- vtrn.32 q11, q9
- vtrn.32 q10, q8
- vmul.f32 d20, d18, d25
- vmul.f32 d22, d19, d24
- vmul.f32 d21, d19, d25
- vmul.f32 d18, d18, d24
- vmul.f32 d19, d16, d25
- vmul.f32 d30, d17, d24
- vmul.f32 d23, d16, d24
- vmul.f32 d24, d17, d25
- vadd.f32 d17, d22, d20
- vsub.f32 d16, d18, d21
- vsub.f32 d21, d30, d19
- vadd.f32 d20, d24, d23
- vadd.f32 q9, q8, q10
- vsub.f32 q8, q8, q10
- vadd.f32 q4, q14, q9
- vsub.f32 q6, q14, q9
- vadd.f32 d11, d27, d16 @
- vsub.f32 d15, d27, d16 @
- vsub.f32 d10, d26, d17 @
- vadd.f32 d14, d26, d17 @
- vswp d9, d10
- vswp d13, d14
- vstmia lr!, {q4-q7}
-
-
- .align 4
+ vld1.32 {q8}, [r5, :128]!
+ vld1.32 {q10}, [r6, :128]!
+ vld2.32 {q11}, [r4, :128]!
+ vld2.32 {q13}, [r3, :128]!
+ vld2.32 {q15}, [r10, :128]!
+ vorr d25, d17, d17
+ vorr d24, d20, d20
+ vorr d20, d16, d16
+ vsub.f32 q9, q13, q11
+ vadd.f32 q11, q13, q11
+ ldr r2, [r12], #4
+ vtrn.32 d24, d25
+ ldr lr, [r12], #4
+ vtrn.32 d20, d21
+ add r2, r0, r2, lsl #2
+ vsub.f32 q8, q10, q12
+ add lr, r0, lr, lsl #2
+ vadd.f32 q10, q10, q12
+ vadd.f32 q0, q11, q10
+ vadd.f32 d25, d19, d16
+ vsub.f32 d27, d19, d16
+ vsub.f32 q1, q11, q10
+ vsub.f32 d24, d18, d17
+ vadd.f32 d26, d18, d17
+ vtrn.32 q0, q12
+ vtrn.32 q1, q13
+ vld1.32 {d24, d25}, [r11, :128]
+ vswp d1, d2
+ vst1.32 {q0, q1}, [r2, :128]!
+ vld2.32 {q0}, [r9, :128]!
+ vadd.f32 q1, q0, q15
+ vld2.32 {q13}, [r8, :128]!
+ vld2.32 {q14}, [r7, :128]!
+ vsub.f32 q15, q0, q15
+ vsub.f32 q0, q14, q13
+ vadd.f32 q3, q14, q13
+ vadd.f32 q2, q3, q1
+ vadd.f32 d29, d1, d30
+ vsub.f32 d27, d1, d30
+ vsub.f32 q3, q3, q1
+ vsub.f32 d28, d0, d31
+ vadd.f32 d26, d0, d31
+ vtrn.32 q2, q14
+ vtrn.32 q3, q13
+ vswp d5, d6
+ vst1.32 {q2, q3}, [r2, :128]!
+ vtrn.32 q11, q9
+ vtrn.32 q10, q8
+ vmul.f32 d20, d18, d25
+ vmul.f32 d22, d19, d24
+ vmul.f32 d21, d19, d25
+ vmul.f32 d18, d18, d24
+ vmul.f32 d19, d16, d25
+ vmul.f32 d30, d17, d24
+ vmul.f32 d23, d16, d24
+ vmul.f32 d24, d17, d25
+ vadd.f32 d17, d22, d20
+ vsub.f32 d16, d18, d21
+ vsub.f32 d21, d30, d19
+ vadd.f32 d20, d24, d23
+ vadd.f32 q9, q8, q10
+ vsub.f32 q8, q8, q10
+ vadd.f32 q4, q14, q9
+ vsub.f32 q6, q14, q9
+ vadd.f32 d11, d27, d16
+ vsub.f32 d15, d27, d16
+ vsub.f32 d10, d26, d17
+ vadd.f32 d14, d26, d17
+ vswp d9, d10
+ vswp d13, d14
+ vstmia lr!, {q4-q7}
+
+ .align 4
#ifdef __APPLE__
- .globl _neon_end
+ .globl _neon_end
_neon_end:
#else
- .globl neon_end
+ .globl neon_end
neon_end:
#endif
- bx lr
-
+ bx lr
- .align 4
+ .align 4
#ifdef __APPLE__
- .globl _neon_transpose
-_neon_transpose:
+ .globl _neon_transpose4
+_neon_transpose4:
#else
- .globl neon_transpose
-neon_transpose:
+ .globl neon_transpose4
+neon_transpose4:
#endif
- push {r4-r8}
- @ vpush {q8-q9}
- mov r5, r3
-_neon_transpose_col:
- mov r7, r1
- add r8, r1, r3, lsl #3
- mov r4, r2
- add r6, r0, r2, lsl #3
-_neon_transpose_row:
- vld1.32 {q8,q9}, [r0, :128]!
-@ vld1.32 {q10,q11}, [r0, :128]!
- vld1.32 {q12,q13}, [r6, :128]!
-@ vld1.32 {q14,q15}, [r6, :128]!
- sub r4, r4, #4
- cmp r4, #0
- vswp d17,d24
- vswp d19,d26
- vswp d21,d28
- vswp d23,d30
- vst1.32 {q8}, [r7, :128]
- vst1.32 {q12}, [r8, :128]
- add r7, r7, r3, lsl #4
- add r8, r8, r3, lsl #4
- vst1.32 {q9}, [r7, :128]
- vst1.32 {q13}, [r8, :128]
- add r7, r7, r3, lsl #4
- add r8, r8, r3, lsl #4
-@@vst1.32 {q10}, [r7, :128]
-@@vst1.32 {q14}, [r8, :128]
-@@add r7, r7, r3, lsl #4
-@@add r8, r8, r3, lsl #4
-@@vst1.32 {q11}, [r7, :128]
-@@vst1.32 {q15}, [r8, :128]
-@@add r7, r7, r3, lsl #4
-@@add r8, r8, r3, lsl #4
- bne _neon_transpose_row
- sub r5, r5, #2
- cmp r5, #0
- add r0, r0, r2, lsl #3
- add r1, r1, #16
- bne _neon_transpose_col
- @ vpop {q8-q9}
- pop {r4-r8}
- bx lr
-
- .align 4
+ push {r4-r6, lr}
+ mov r5, r3
+1:
+ mov ip, r1
+ add lr, r1, r3, lsl #3
+ mov r4, r2
+ add r6, r0, r2, lsl #3
+2:
+ vld1.32 {q8, q9}, [r0, :128]!
+ vld1.32 {q12,q13}, [r6, :128]!
+ subs r4, r4, #4
+ vswp d17, d24
+ vswp d19, d26
+ vswp d21, d28
+ vswp d23, d30
+ vst1.32 {q8}, [ip, :128]
+ vst1.32 {q12}, [lr, :128]
+ add ip, ip, r3, lsl #4
+ add lr, lr, r3, lsl #4
+ vst1.32 {q9}, [ip, :128]
+ vst1.32 {q13}, [lr, :128]
+ add ip, ip, r3, lsl #4
+ add lr, lr, r3, lsl #4
+ bne 2b
+ subs r5, r5, #2
+ add r0, r0, r2, lsl #3
+ add r1, r1, #16
+ bne 1b
+ pop {r4-r6, pc}
+
+ .align 4
#ifdef __APPLE__
- .globl _neon_transpose_to_buf
-_neon_transpose_to_buf:
+ .globl _neon_transpose8
+_neon_transpose8:
#else
- .globl neon_transpose_to_buf
-neon_transpose_to_buf:
+ .globl neon_transpose8
+neon_transpose8:
#endif
- push {r4-r10}
- mov r5, #8
-_neon_transpose_to_buf_col:
- mov r4, #8
- add r6, r0, r2, lsl #3
- mov r7, r1
- add r8, r1, #64
- add r9, r1, #128
- add r10, r1, #192
-_neon_transpose_to_buf_row:
- vld1.32 {q8,q9}, [r0, :128]!
- vld1.32 {q12,q13}, [r6, :128]!
- sub r4, r4, #4
- cmp r4, #0
- vswp d17,d24
- vswp d19,d26
- vst1.32 {q8}, [r7, :128]
- vst1.32 {q12}, [r8, :128]
- vst1.32 {q9}, [r9, :128]
- vst1.32 {q13}, [r10, :128]
- add r7, r7, #256
- add r8, r8, #256
- add r9, r9, #256
- add r10, r10, #256
- bne _neon_transpose_to_buf_row
- sub r5, r5, #2
- cmp r5, #0
- sub r0, r0, #64
- add r0, r0, r2, lsl #4
- add r1, r1, #16
- bne _neon_transpose_to_buf_col
- pop {r4-r10}
- bx lr
+ push {r4-r12, lr}
+ vpush {q4-q7}
+
+ @ initialize
+ lsl r2, r2, #3
+ mul lr, r2, r3
+ lsl r3, r3, #5
+ add r4, r0, r2
+ lsl ip, r2, #1
+ add r5, r1, r3, lsr #2
+ add r6, r1, r3, lsr #1
+ add r7, r5, r3, lsr #1
+ sub lr, r3, lr
+ sub ip, ip, #64
+ sub r8, r3, #48
+ add lr, lr, #16
+1:
+ @ process all but the last one
+ subs r11, r2, #64
+
+ @ prefetch next rows 0-5
+ pld [r0]
+ pld [r4]
+ pld [r0, r2, lsl #1]
+ pld [r4, r2, lsl #1]
+ pld [r0, r2, lsl #2]
+ pld [r4, r2, lsl #2]
+
+ @ if there is only the last one
+ beq 3f
+2:
+ @ matrix 0&2 row 0-1
+ vld1.32 {q0, q1}, [r0, :128]!
+ vld1.32 {q2, q3}, [r4, :128]!
+ vswp d1, d4
+ vswp d3, d6
+ vst1.32 {q0}, [r1, :128]!
+ vst1.32 {q2}, [r5, :128]!
+ vst1.32 {q1}, [r6, :128]!
+ vst1.32 {q3}, [r7, :128]!
+
+ @ matrix 1&3 row 0-1
+ vld1.32 {q4, q5}, [r0, :128]!
+ vld1.32 {q6, q7}, [r4, :128]!
+ vswp d9, d12
+ vswp d11, d14
+
+ @ prefetch next rows 0-1
+ pld [r0]
+ pld [r4]
+ add r9, r0, ip
+ add r10, r4, ip
+
+ @ matrix 0&2, row 2-3
+ vld1.32 {q0, q1}, [r9, :128]!
+ vld1.32 {q2, q3}, [r10, :128]!
+ vswp d1, d4
+ vswp d3, d6
+ vst1.32 {q0}, [r1, :128]!
+ vst1.32 {q2}, [r5, :128]!
+ vst1.32 {q1}, [r6, :128]!
+ vst1.32 {q3}, [r7, :128]!
+
+ @ matrix 1&3, row 2-3
+ vld1.32 {q8, q9}, [r9, :128]!
+ vld1.32 {q10, q11}, [r10, :128]!
+ vswp d17, d20
+ vswp d19, d22
+
+ @ prefetch next rows 2-3
+ pld [r9]
+ pld [r10]
+ add r9, r9, ip
+ add r10, r10, ip
+
+ @ matrix 0&2, row 4-5
+ vld1.32 {q0, q1}, [r9, :128]!
+ vld1.32 {q2, q3}, [r10, :128]!
+ vswp d1, d4
+ vswp d3, d6
+ vst1.32 {q0}, [r1, :128]!
+ vst1.32 {q2}, [r5, :128]!
+ vst1.32 {q1}, [r6, :128]!
+ vst1.32 {q3}, [r7, :128]!
+
+ @ matrix 1&3, row 4-5
+ vld1.32 {q12, q13}, [r9, :128]!
+ vld1.32 {q14, q15}, [r10, :128]!
+ vswp d25, d28
+ vswp d27, d30
+
+ @ prefetch next rows 4-5
+ pld [r9]
+ pld [r10]
+ add r9, r9, ip
+ add r10, r10, ip
+
+ @ matrix 0&2, row 6-7
+ vld1.32 {q0, q1}, [r9, :128]!
+ vld1.32 {q2, q3}, [r10, :128]!
+ vswp d1, d4
+ vswp d3, d6
+ vst1.32 {q0}, [r1, :128], r8
+ vst1.32 {q2}, [r5, :128], r8
+ vst1.32 {q1}, [r6, :128], r8
+ vst1.32 {q3}, [r7, :128], r8
+
+ @ matrix 1&3, row 6-7
+ vld1.32 {q0, q1}, [r9, :128]!
+ vld1.32 {q2, q3}, [r10, :128]!
+ vswp d1, d4
+ vswp d3, d6
+
+ @ prefetch next rows 6-7
+ pld [r9]
+ pld [r10]
+
+ subs r11, r11, #64
+
+ @ these could be replaced with VSTM, but that requires swaps
+ vst1.32 {q4}, [r1, :128]!
+ vst1.32 {q8}, [r1, :128]!
+ vst1.32 {q12}, [r1, :128]!
+ vst1.32 {q0}, [r1, :128], r8
+
+ vst1.32 {q6}, [r5, :128]!
+ vst1.32 {q10}, [r5, :128]!
+ vst1.32 {q14}, [r5, :128]!
+ vst1.32 {q2}, [r5, :128], r8
+
+ vst1.32 {q5}, [r6, :128]!
+ vst1.32 {q9}, [r6, :128]!
+ vst1.32 {q13}, [r6, :128]!
+ vst1.32 {q1}, [r6, :128], r8
+
+ vst1.32 {q7}, [r7, :128]!
+ vst1.32 {q11}, [r7, :128]!
+ vst1.32 {q15}, [r7, :128]!
+ vst1.32 {q3}, [r7, :128], r8
+
+ @ process all but the last on row
+ bne 2b
+3:
+ @ process the last one
+ subs r3, r3, #256
+
+ @ matrix 0&2 row 0-1
+ vld1.32 {q0, q1}, [r0, :128]!
+ vld1.32 {q2, q3}, [r4, :128]!
+ vswp d1, d4
+ vswp d3, d6
+ vst1.32 {q0}, [r1, :128]!
+ vst1.32 {q2}, [r5, :128]!
+ vst1.32 {q1}, [r6, :128]!
+ vst1.32 {q3}, [r7, :128]!
+
+ @ matrix 1&3 row 0-1
+ vld1.32 {q4, q5}, [r0, :128]!
+ vld1.32 {q6, q7}, [r4, :128]!
+ vswp d9, d12
+ vswp d11, d14
+ add r9, r0, ip
+ add r10, r4, ip
+
+ @ matrix 0&2, row 2-3
+ vld1.32 {q0, q1}, [r9, :128]!
+ vld1.32 {q2, q3}, [r10, :128]!
+ vswp d1, d4
+ vswp d3, d6
+ vst1.32 {q0}, [r1, :128]!
+ vst1.32 {q2}, [r5, :128]!
+ vst1.32 {q1}, [r6, :128]!
+ vst1.32 {q3}, [r7, :128]!
+
+ @ matrix 1&3, row 2-3
+ vld1.32 {q8, q9}, [r9, :128]!
+ vld1.32 {q10, q11}, [r10, :128]!
+ vswp d17, d20
+ vswp d19, d22
+ add r9, r9, ip
+ add r10, r10, ip
+
+ @ matrix 0&2, row 4-5
+ vld1.32 {q0, q1}, [r9, :128]!
+ vld1.32 {q2, q3}, [r10, :128]!
+ vswp d1, d4
+ vswp d3, d6
+ vst1.32 {q0}, [r1, :128]!
+ vst1.32 {q2}, [r5, :128]!
+ vst1.32 {q1}, [r6, :128]!
+ vst1.32 {q3}, [r7, :128]!
+
+ @ matrix 1&3, row 4-5
+ vld1.32 {q12, q13}, [r9, :128]!
+ vld1.32 {q14, q15}, [r10, :128]!
+ vswp d25, d28
+ vswp d27, d30
+ add r9, r9, ip
+ add r10, r10, ip
+
+ @ matrix 0&2, row 6-7
+ vld1.32 {q0, q1}, [r9, :128]!
+ vld1.32 {q2, q3}, [r10, :128]!
+ vswp d1, d4
+ vswp d3, d6
+ vst1.32 {q0}, [r1, :128], r8
+ vst1.32 {q2}, [r5, :128], r8
+ vst1.32 {q1}, [r6, :128], r8
+ vst1.32 {q3}, [r7, :128], r8
+
+ @ matrix 1&3, row 6-7
+ vld1.32 {q0, q1}, [r9, :128]!
+ vld1.32 {q2, q3}, [r10, :128]!
+ vswp d1, d4
+ vswp d3, d6
+
+ @ next row starts right after
+ mov r0, r10
+ add r4, r10, r2
+
+ @ these could be replaced with VSTM, but that requires swaps
+ vst1.32 {q4}, [r1, :128]!
+ vst1.32 {q8}, [r1, :128]!
+ vst1.32 {q12}, [r1, :128]!
+ vst1.32 {q0}, [r1, :128], lr
+
+ vst1.32 {q6}, [r5, :128]!
+ vst1.32 {q10}, [r5, :128]!
+ vst1.32 {q14}, [r5, :128]!
+ vst1.32 {q2}, [r5, :128], lr
+
+ vst1.32 {q5}, [r6, :128]!
+ vst1.32 {q9}, [r6, :128]!
+ vst1.32 {q13}, [r6, :128]!
+ vst1.32 {q1}, [r6, :128], lr
+
+ vst1.32 {q7}, [r7, :128]!
+ vst1.32 {q11}, [r7, :128]!
+ vst1.32 {q15}, [r7, :128]!
+ vst1.32 {q3}, [r7, :128], lr
+
+ @ process all columns
+ bne 1b
+
+ vpop {q4-q7}
+ pop {r4-r12, pc}
diff --git a/lib/ffts/src/neon_float.h b/lib/ffts/src/neon_float.h
deleted file mode 100644
index a958b8a..0000000
--- a/lib/ffts/src/neon_float.h
+++ /dev/null
@@ -1,1126 +0,0 @@
-/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-#ifndef __NEON_FLOAT_H__
-#define __NEON_FLOAT_H__
-
-#include <arm_neon.h>
-
-//#define VL 4
-#define __INLINE static inline __attribute__((always_inline))
-
-typedef float32x4_t V;
-
-typedef float32x4x2_t VS;
-
-#if defined(complex)
- typedef complex float cdata_t;
-#else
- typedef float cdata_t[2];
-#endif
- typedef float data_t;
-
-#define ADD vaddq_f32
-#define SUB vsubq_f32
-#define MUL vmulq_f32
-#define VADD vaddq_f32
-#define VSUB vsubq_f32
-#define VMUL vmulq_f32
-#define VXOR(x,y) (vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), vreinterpretq_u32_f32(y))))
-#define VST vst1q_f32
-#define VLD vld1q_f32
-#define VST2 vst2q_f32
-#define VLD2 vld2q_f32
-
-#define VSWAPPAIRS(x) (vrev64q_f32(x))
-
-#define VUNPACKHI(a,b) (vcombine_f32(vget_high_f32(a), vget_high_f32(b)))
-#define VUNPACKLO(a,b) (vcombine_f32(vget_low_f32(a), vget_low_f32(b)))
-
-#define VBLEND(x,y) (vcombine_f32(vget_low_f32(x), vget_high_f32(y)))
-
-__INLINE V VLIT4(data_t f3, data_t f2, data_t f1, data_t f0) {
- data_t __attribute__ ((aligned(16))) d[4] = {f0, f1, f2, f3};
- return VLD(d);
-}
-
-#define VDUPRE(r) vcombine_f32(vdup_lane_f32(vget_low_f32(r),0), vdup_lane_f32(vget_high_f32(r),0))
-#define VDUPIM(r) vcombine_f32(vdup_lane_f32(vget_low_f32(r),1), vdup_lane_f32(vget_high_f32(r),1))
-
-#define FFTS_MALLOC(d,a) (valloc(d))
-#define FFTS_FREE(d) (free(d))
-__INLINE void FMA(V *Rd, V Rn, V Rm) {
- *Rd = vmlaq_f32(*Rd, Rn, Rm);
-// __asm__ ("vmla.f32 %q0,%q1,%q2\n\t"
-// : "+w" (*Rd)
-// : "w" (Rn), "w" (Rm)
-// //: "0"
-// );
-
-}
-__INLINE void FMS(V *Rd, V Rn, V Rm) {
- *Rd = vmlsq_f32(*Rd, Rn, Rm);
-// __asm__ ("vmls.f32 %q0,%q1,%q2\n\t"
-// : "+w" (*Rd)
-// : "w" (Rn), "w" (Rm)
-// // : "0"
-// );
-}
-
-__INLINE VS VSMUL(VS *d, VS *w) {
- VS t;
- t.val[0] = vmulq_f32(d->val[0], w->val[0]);
- t.val[1] = vmulq_f32(d->val[0], w->val[1]);
-// t.val[0] = vmlsq_f32(t.val[0], d->val[1], w->val[1]);
-// t.val[1] = vmlaq_f32(t.val[1], d->val[1], w->val[0]);
- FMS(&t.val[0], d->val[1], w->val[1]);
- FMA(&t.val[1], d->val[1], w->val[0]);
- return t;
-}
-__INLINE VS VSMULJ(VS *d, VS *w) {
- VS t;
- t.val[0] = vmulq_f32(d->val[0], w->val[0]);
- t.val[1] = vmulq_f32(d->val[1], w->val[0]);
-// t.val[0] = vmlaq_f32(t.val[0], d->val[1], w->val[1]);
-// t.val[1] = vmlsq_f32(t.val[1], d->val[0], w->val[1]);
- FMA(&t.val[0], d->val[1], w->val[1]);
- FMS(&t.val[1], d->val[0], w->val[1]);
- return t;
-}
-__INLINE VS VSADD(VS *a, VS *b) {
- VS r;
- r.val[0] = vaddq_f32(a->val[0], b->val[0]);
- r.val[1] = vaddq_f32(a->val[1], b->val[1]);
- return r;
-}
-__INLINE VS VSSUB(VS *a, VS *b) {
- VS r;
- r.val[0] = vsubq_f32(a->val[0], b->val[0]);
- r.val[1] = vsubq_f32(a->val[1], b->val[1]);
- return r;
-}
-__INLINE VS VSSUB_MULI(VS *a, VS *b) {
- VS r;
- r.val[0] = vaddq_f32(a->val[0], b->val[1]);
- r.val[1] = vsubq_f32(a->val[1], b->val[0]);
- return r;
-}
-__INLINE VS VSADD_MULI(VS *a, VS *b) {
- VS r;
- r.val[0] = vsubq_f32(a->val[0], b->val[1]);
- r.val[1] = vaddq_f32(a->val[1], b->val[0]);
- return r;
-}
-
-__INLINE void VSK_N(VS w, VS *r0, VS *r1, VS *r2, VS *r3) {
- VS uk, uk2, zk_p, zk_n, zk, zk_d;
- uk = *r0; uk2 = *r1;
- zk_p = VSMUL(r2, &w);
- zk_n = VSMULJ(r3, &w);
-
- zk = VSADD(&zk_p, &zk_n);
- zk_d = VSSUB(&zk_p, &zk_n);
-
- *r2 = VSSUB(&uk, &zk);
- *r0 = VSADD(&uk, &zk);
- *r3 = VSADD_MULI(&uk2, &zk_d);
- *r1 = VSSUB_MULI(&uk2, &zk_d);
-}
-
-
-__INLINE float32x2x2_t HVS_ADD(float32x2x2_t a, float32x2x2_t b) {
- float32x2x2_t rval;
- rval.val[0] = vadd_f32(a.val[0], b.val[0]);
- rval.val[1] = vadd_f32(a.val[1], b.val[1]);
- return rval;
-}
-__INLINE float32x2x2_t HVS_SUB(float32x2x2_t a, float32x2x2_t b) {
- float32x2x2_t rval;
- rval.val[0] = vsub_f32(a.val[0], b.val[0]);
- rval.val[1] = vsub_f32(a.val[1], b.val[1]);
- return rval;
-}
-__INLINE float32x2x2_t HVS_SUB_MULI(float32x2x2_t a, float32x2x2_t b) {
- float32x2x2_t rval;
- rval.val[0] = vadd_f32(a.val[0], b.val[1]);
- rval.val[1] = vsub_f32(a.val[1], b.val[0]);
- return rval;
-}
-__INLINE float32x2x2_t HVS_ADD_MULI(float32x2x2_t a, float32x2x2_t b) {
- float32x2x2_t rval;
- rval.val[0] = vsub_f32(a.val[0], b.val[1]);
- rval.val[1] = vadd_f32(a.val[1], b.val[0]);
- return rval;
-}
-__INLINE float32x2x2_t HVS_MUL(float32x2x2_t d, float32x2x2_t w) {
- float32x2x2_t t;
- t.val[0] = vmul_f32(d.val[0], w.val[0]);
- t.val[1] = vmul_f32(d.val[0], w.val[1]);
- t.val[0] = vmls_f32(t.val[0], d.val[1], w.val[1]);
- t.val[1] = vmla_f32(t.val[1], d.val[1], w.val[0]);
- return t;
-}
-__INLINE float32x2x2_t HVS_MULJ(float32x2x2_t d, float32x2x2_t w) {
- float32x2x2_t t;
- t.val[0] = vmul_f32(d.val[0], w.val[0]);
- t.val[1] = vmul_f32(d.val[1], w.val[0]);
- t.val[0] = vmla_f32(t.val[0], d.val[1], w.val[1]);
- t.val[1] = vmls_f32(t.val[1], d.val[0], w.val[1]);
- return t;
-}
-__INLINE void HVS_K_N(float32x2x2_t w, float32x2x2_t *r0, float32x2x2_t *r1, float32x2x2_t *r2, float32x2x2_t *r3) {
- float32x2x2_t uk, uk2, zk_p, zk_n, zk, zk_d;
- uk = *r0; uk2 = *r1;
- zk_p = HVS_MUL(*r2, w);
- zk_n = HVS_MULJ(*r3, w);
- zk = HVS_ADD(zk_p, zk_n);
- zk_d = HVS_SUB(zk_p, zk_n);
-
- *r2 = HVS_SUB(uk, zk);
- *r0 = HVS_ADD(uk, zk);
- *r3 = HVS_ADD_MULI(uk2, zk_d);
- *r1 = HVS_SUB_MULI(uk2, zk_d);
-}
-
-typedef union {
- float32x4_t f32x4;
- float32x2x2_t f32x2x2;
-} float_mixed_t;
-
-__INLINE void VSWP(float32x2x2_t *a, float32x2x2_t *b) {
-//float32x2_t tmp = a->val[1];
-//a->val[1] = b->val[0];
-//b->val[0] = tmp;
- __asm__ ("vswp %0,%1\n\t"
- : "+w" (a->val[1]), "+w" (b->val[0])
- :
- );
-}
-
-static const __attribute__ ((aligned(16))) float ee_w_data[4] = {0.70710678118654757273731092936941,0.70710678118654746171500846685376,
- -0.70710678118654757273731092936941,-0.70710678118654746171500846685376};
-__INLINE void LEAF_EE8_SPLIT(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) {
- data_t *out0 = out + (*out_offsets)[0];
- data_t *out1 = out + (*out_offsets)[1];
- *out_offsets += 2;
-
- float32x2x2_t r0, r1, r2, r3, r4, r5, r6, r7;
- float32x2x2_t t0, t1, t2, t3, t4, t5, t6, t7;
-
- t0 = vld2_f32(in + (*is)[0]); t1 = vld2_f32(in + (*is)[1]); t2 = vld2_f32(in + (*is)[2]); t3 = vld2_f32(in + (*is)[3]);
-
- t4 = HVS_ADD (t0, t1);
- t5 = HVS_SUB (t0, t1);
- t6 = HVS_ADD (t2, t3);
- t7 = HVS_SUB (t2, t3);
- r0 = HVS_ADD (t4, t6);
- r2 = HVS_SUB (t4, t6);
- r1 = HVS_SUB_MULI(t5, t7);
- r3 = HVS_ADD_MULI(t5, t7);
-
- t0 = vld2_f32(in + (*is)[4]); t1 = vld2_f32(in + (*is)[5]); t2 = vld2_f32(in + (*is)[6]); t3 = vld2_f32(in + (*is)[7]);
- r4 = HVS_ADD (t0, t1);
- r5 = HVS_SUB (t0, t1);
- r6 = HVS_ADD (t2, t3);
- r7 = HVS_SUB (t2, t3);
- t0 = r0; t1 = r2;
- t2 = HVS_ADD(r4, r6);
- t3 = HVS_SUB(r4, r6);
- r0 = HVS_ADD(t0, t2);
- r4 = HVS_SUB(t0, t2);
- r2 = HVS_SUB_MULI(t1, t3);
- r6 = HVS_ADD_MULI(t1, t3);
-
- float32x4_t w = vld1q_f32(ee_w_data);
- float32x2x2_t ww;
- ww.val[0] = vget_low_f32(w);
- ww.val[1] = vget_high_f32(w);
-
- HVS_K_N(ww,&r1,&r3,&r5,&r7);
-
-//vst2_f32(out0, r0);
-//vst2_f32(out0+4, r2);
-//vst2_f32(out0+8, r4);
-//vst2_f32(out0+12, r6);
-
-//vst2_f32(out1, r1);
-//vst2_f32(out1+4, r3);
-//vst2_f32(out1+8, r5);
-//vst2_f32(out1+12, r7);
-
- float32x2x2_t tt0, tt1, tt2, tt3, tt4, tt5, tt6, tt7;
-
- tt0 = vtrn_f32(r0.val[0], r0.val[1]);
- tt1 = vtrn_f32(r1.val[0], r1.val[1]);
- tt2 = vtrn_f32(r2.val[0], r2.val[1]);
- tt3 = vtrn_f32(r3.val[0], r3.val[1]);
- tt4 = vtrn_f32(r4.val[0], r4.val[1]);
- tt5 = vtrn_f32(r5.val[0], r5.val[1]);
- tt6 = vtrn_f32(r6.val[0], r6.val[1]);
- tt7 = vtrn_f32(r7.val[0], r7.val[1]);
-
-//VSWP(&tt0.f32x2x2, &tt1.f32x2x2);
-//VSWP(&tt2.f32x2x2, &tt3.f32x2x2);
-//VSWP(&tt4.f32x2x2, &tt5.f32x2x2);
-//VSWP(&tt6.f32x2x2, &tt7.f32x2x2);
-
- float32x4_t z0, z1, z2, z3, z4, z5, z6, z7;
-
- z0 = vcombine_f32(tt0.val[0], tt1.val[0]);
- z1 = vcombine_f32(tt0.val[1], tt1.val[1]);
- z2 = vcombine_f32(tt2.val[0], tt3.val[0]);
- z3 = vcombine_f32(tt2.val[1], tt3.val[1]);
- z4 = vcombine_f32(tt4.val[0], tt5.val[0]);
- z5 = vcombine_f32(tt4.val[1], tt5.val[1]);
- z6 = vcombine_f32(tt6.val[0], tt7.val[0]);
- z7 = vcombine_f32(tt6.val[1], tt7.val[1]);
-
-
- vst1q_f32(out0, z0);
- vst1q_f32(out0+4, z2);
- vst1q_f32(out0+8, z4);
- vst1q_f32(out0+12, z6);
-
- vst1q_f32(out1, z1);
- vst1q_f32(out1+4, z3);
- vst1q_f32(out1+8, z5);
- vst1q_f32(out1+12, z7);
-/*
- vst1_f32(out0, tt0.val[0]);
- vst1_f32(out0+2, tt1.val[0]);
- vst1_f32(out0+4, tt2.val[0]);
- vst1_f32(out0+6, tt3.val[0]);
- vst1_f32(out0+8, tt4.val[0]);
- vst1_f32(out0+10, tt5.val[0]);
- vst1_f32(out0+12, tt6.val[0]);
- vst1_f32(out0+14, tt7.val[0]);
-
- vst1_f32(out1, tt0.val[1]);
- vst1_f32(out1+2, tt1.val[1]);
- vst1_f32(out1+4, tt2.val[1]);
- vst1_f32(out1+6, tt3.val[1]);
- vst1_f32(out1+8, tt4.val[1]);
- vst1_f32(out1+10, tt5.val[1]);
- vst1_f32(out1+12, tt6.val[1]);
- vst1_f32(out1+14, tt7.val[1]);
- */
-/*
- float32x4_t rr0 = vcombine_f32(r0.val[0], r0.val[1]);
- float32x4_t rr1 = vcombine_f32(r1.val[0], r1.val[1]);
- float32x4_t rr2 = vcombine_f32(r2.val[0], r2.val[1]);
- float32x4_t rr3 = vcombine_f32(r3.val[0], r3.val[1]);
-
- float32x4x2_t tmp0, tmp1, tmp2, tmp3;
- tmp0 = vtrnq_f32(rr0, rr2);
- tmp1 = vtrnq_f32(rr1, rr3);
-
-
- float32x2x2_t v0, v1, v2, v3;
- v0.val[0] = vget_low_f32(tmp0.val[0]);
- v0.val[1] = vget_high_f32(tmp0.val[0]);
- v1.val[0] = vget_low_f32(tmp0.val[1]);
- v1.val[1] = vget_high_f32(tmp0.val[1]);
- v2.val[0] = vget_low_f32(tmp1.val[0]);
- v2.val[1] = vget_high_f32(tmp1.val[0]);
- v3.val[0] = vget_low_f32(tmp1.val[1]);
- v3.val[1] = vget_high_f32(tmp1.val[1]);
-
- tmp2.val[0] = tmp0.val[0];
- tmp2.val[1] = tmp1.val[0];
- tmp3.val[0] = tmp0.val[1];
- tmp3.val[1] = tmp1.val[1];
-
-//vst2q_f32(out0 , tmp2);
-//vst2q_f32(out1 , tmp3);
- vst2_f32(out0, v0);
- vst2_f32(out0+4, v1);
- vst2_f32(out1, v2);
- vst2_f32(out1+4, v3);
-
- float32x4_t rr4 = vcombine_f32(r4.val[0], r4.val[1]);
- float32x4_t rr5 = vcombine_f32(r5.val[0], r5.val[1]);
- float32x4_t rr6 = vcombine_f32(r6.val[0], r6.val[1]);
- float32x4_t rr7 = vcombine_f32(r7.val[0], r7.val[1]);
-
- tmp0 = vtrnq_f32(rr4, rr6);
- tmp1 = vtrnq_f32(rr5, rr7);
-
- tmp2.val[0] = tmp0.val[0];
- tmp2.val[1] = tmp1.val[0];
- tmp3.val[0] = tmp0.val[1];
- tmp3.val[1] = tmp1.val[1];
- v0.val[0] = vget_low_f32(tmp0.val[0]);
- v0.val[1] = vget_high_f32(tmp0.val[0]);
- v1.val[0] = vget_low_f32(tmp0.val[1]);
- v1.val[1] = vget_high_f32(tmp0.val[1]);
- v2.val[0] = vget_low_f32(tmp1.val[0]);
- v2.val[1] = vget_high_f32(tmp1.val[0]);
- v3.val[0] = vget_low_f32(tmp1.val[1]);
- v3.val[1] = vget_high_f32(tmp1.val[1]);
- vst2_f32(out0+8, v0);
- vst2_f32(out0+12, v1);
- vst2_f32(out1+8, v1);
- vst2_f32(out1+12, v3);
-
-//vst2q_f32(out0 + 8, tmp2);
-//vst2q_f32(out1 + 8, tmp3);
-//vst1q_f32(out0+8, tmp0.val[0]);
-//vst1q_f32(out0+12,tmp0.val[1]);
-//vst1q_f32(out1+8, tmp1.val[0]);
-//vst1q_f32(out1+12,tmp1.val[1]);
- */
- *is += 8;
-}
-
-__INLINE void STORESPR(data_t * addr, VS p) {
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t"
- :
- : "r" (addr), "w" (p.val[0]), "w" (p.val[1])
- : "memory");
-}
-__INLINE void STORESPRI(data_t * restrict * addr, V p0, V p1) {
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t"
- : "+r" (*addr)
- : "w" (p0), "w" (p1)
- : "memory");
-}
-__INLINE void STORESPRI0(data_t * restrict *addr, VS r) {
- register V p0 __asm__ ("q0") = r.val[0];
- register V p1 __asm__ ("q1") = r.val[1];
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t"
- : "+r" (*addr)
- : "w" (p0), "w" (p1)
- : "memory");
- //STORESPRI(addr, p0, p1);
-}
-__INLINE void STORESPRI1(data_t **addr, VS r) {
- register V p0 __asm__ ("q2") = r.val[0];
- register V p1 __asm__ ("q3") = r.val[1];
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t"
- : "+r" (*addr)
- : "w" (p0), "w" (p1)
- : "memory");
- //STORESPRI(addr, p0, p1);
-}
-__INLINE void STORESPRI2(data_t **addr, VS r) {
- register V p0 __asm__ ("q4") = r.val[0];
- register V p1 __asm__ ("q5") = r.val[1];
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t"
- : "+r" (*addr)
- : "w" (p0), "w" (p1)
- : "memory");
- //STORESPRI(addr, p0, p1);
-}
-__INLINE void STORESPRI3(data_t **addr, VS r) {
- register V p0 __asm__ ("q6") = r.val[0];
- register V p1 __asm__ ("q7") = r.val[1];
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t"
- : "+r" (*addr)
- : "w" (p0), "w" (p1)
- : "memory");
- //STORESPRI(addr, p0, p1);
-}
-__INLINE void STORESPRIT0(data_t * restrict *addr, VS r) {
- register V p0 __asm__ ("q0") = r.val[0];
- register V p1 __asm__ ("q1") = r.val[1];
- __asm__ __volatile__ ("vst2.32 {%q1,%q2}, [%0, :128]!\n\t"
- : "+r" (*addr)
- : "w" (p0), "w" (p1)
- : "memory");
- //STORESPRI(addr, p0, p1);
-}
-__INLINE void STORESPRIT1(data_t **addr, VS r) {
- register V p0 __asm__ ("q2") = r.val[0];
- register V p1 __asm__ ("q3") = r.val[1];
- __asm__ __volatile__ ("vst2.32 {%q1,%q2}, [%0, :128]!\n\t"
- : "+r" (*addr)
- : "w" (p0), "w" (p1)
- : "memory");
- //STORESPRI(addr, p0, p1);
-}
-__INLINE void STORESPRIT2(data_t **addr, VS r) {
- register V p0 __asm__ ("q4") = r.val[0];
- register V p1 __asm__ ("q5") = r.val[1];
- __asm__ __volatile__ ("vst2.32 {%q1,%q2}, [%0, :128]!\n\t"
- : "+r" (*addr)
- : "w" (p0), "w" (p1)
- : "memory");
- //STORESPRI(addr, p0, p1);
-}
-__INLINE void STORESPRIT3(data_t **addr, VS r) {
- register V p0 __asm__ ("q6") = r.val[0];
- register V p1 __asm__ ("q7") = r.val[1];
- __asm__ __volatile__ ("vst2.32 {%q1,%q2}, [%0, :128]!\n\t"
- : "+r" (*addr)
- : "w" (p0), "w" (p1)
- : "memory");
- //STORESPRI(addr, p0, p1);
-}
-__INLINE void STORESPR0(data_t *addr, VS r) {
- register V p0 __asm__ ("q0") = r.val[0];
- register V p1 __asm__ ("q1") = r.val[1];
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t"
- :
- : "r" (addr), "w" (p0), "w" (p1)
- : "memory");
-}
-__INLINE void STORESPR1(data_t *addr, VS r) {
- register V p0 __asm__ ("q2") = r.val[0];
- register V p1 __asm__ ("q3") = r.val[1];
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t"
- :
- : "r" (addr), "w" (p0), "w" (p1)
- : "memory");
-}
-__INLINE void STORESPR2(data_t *addr, VS r) {
- register V p0 __asm__ ("q4") = r.val[0];
- register V p1 __asm__ ("q5") = r.val[1];
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t"
- :
- : "r" (addr), "w" (p0), "w" (p1)
- : "memory");
-}
-__INLINE void STORESPR3(data_t *addr, VS r) {
- register V p0 __asm__ ("q6") = r.val[0];
- register V p1 __asm__ ("q7") = r.val[1];
- __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t"
- :
- : "r" (addr), "w" (p0), "w" (p1)
- : "memory");
-}
-__INLINE VS LOADSPR0(data_t *addr) {
- VS r;
- register V p0 __asm__ ("q8") ;
- register V p1 __asm__ ("q9") ;
- __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]\n\t"
- : "=&w" (p0), "=&w" (p1)
- : "r" (addr)
- );
- r.val[0] = p0; r.val[1] = p1;
- return r;
-}
-__INLINE VS LOADSPR1(data_t *addr) {
- VS r;
- register V p0 __asm__ ("q10") ;
- register V p1 __asm__ ("q11") ;
- __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]\n\t"
- : "=&w" (p0), "=&w" (p1)
- : "r" (addr)
- );
- r.val[0] = p0; r.val[1] = p1;
- return r;
-}
-__INLINE VS LOADSPR2(data_t *addr) {
- VS r;
- register V p0 __asm__ ("q12") ;
- register V p1 __asm__ ("q13") ;
- __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]\n\t"
- : "=&w" (p0), "=&w" (p1)
- : "r" (addr)
- );
- r.val[0] = p0; r.val[1] = p1;
- return r;
-}
-__INLINE VS LOADSPR3(data_t *addr) {
- VS r;
- register V p0 __asm__ ("q14") ;
- register V p1 __asm__ ("q15") ;
- __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]\n\t"
- : "=&w" (p0), "=&w" (p1)
- : "r" (addr)
- );
- r.val[0] = p0; r.val[1] = p1;
- return r;
-}
-__INLINE VS LOADSPRI(data_t * restrict * addr) {
- VS r;
- register V p0 __asm__ ("q2") ;
- register V p1 __asm__ ("q3") ;
- __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]!\n\t"
- : "=&w" (p0), "=&w" (p1), "+r" (*addr)
- :
- );
- r.val[0] = p0; r.val[1] = p1;
- return r;
-}
-
-__INLINE void X_4_SPLIT(data_t * restrict data, size_t N, data_t * restrict LUT) {
-
-//size_t i;
-//for(i=0;i<N/4/2/2;i++) {
- VS uk = LOADSPR0(data);
- VS uk2 = LOADSPR1(data + 2*N/4);
- VS zk_p = LOADSPR2(data + 4*N/4);
- VS zk_n = LOADSPR3(data + 6*N/4);
-
- VSK_N(LOADSPRI(&LUT), &uk, &uk2, &zk_p, &zk_n);
-
- STORESPR0(data, uk);
- STORESPR1(data + 2*N/4, uk2);
- STORESPR2(data + 4*N/4, zk_p);
- STORESPR3(data + 6*N/4, zk_n);
-
-// LUT += 8;
-// data += 8;
-// }
-}
-
-__INLINE void X_8_SPLIT(data_t * restrict data0, size_t N, data_t * restrict LUT) {
- data_t *data2 = data0 + 2*N/4;
- data_t *data4 = data0 + 4*N/4;
- data_t *data6 = data0 + 6*N/4;
- data_t *data1 = data0 + 1*N/4;
- data_t *data3 = data0 + 3*N/4;
- data_t *data5 = data0 + 5*N/4;
- data_t *data7 = data0 + 7*N/4;
- size_t k, n4 = N/4;
-
- for(k=N/8/2/2;k>0;--k) {
- VS r0, r1, r2, r3, r4, r5, r6, r7,w;
- r0 = LOADSPR0(data0);
- r2 = LOADSPR1(data2);
- r1 = LOADSPR2(data1);
- r3 = LOADSPR3(data3);
- VSK_N(LOADSPRI(&LUT), &r0, &r1, &r2, &r3);
- STORESPR2(data1, r1);
- STORESPR3(data3, r3);
- r4 = LOADSPR2(data4);
- r6 = LOADSPR3(data6);
- VSK_N(LOADSPRI(&LUT), &r0, &r2, &r4, &r6);
- STORESPRI0(&data0, r0); //data0 += 8;
- STORESPRI1(&data2, r2); //data2 += 8;
- STORESPRI2(&data4, r4); //data4 += 8;
- STORESPRI3(&data6, r6); //data6 += 8;
- r1 = LOADSPR0(data1);
- r3 = LOADSPR1(data3);
- r5 = LOADSPR2(data5);
- r7 = LOADSPR3(data7);
- VSK_N(LOADSPRI(&LUT), &r1, &r3, &r5, &r7);
- // LUT += 24;
- STORESPRI0(&data1, r1); //data1 += 8;
- STORESPRI1(&data3, r3); //data3 += 8;
- STORESPRI2(&data5, r5); //data5 += 8;
- STORESPRI3(&data7, r7); //data7 += 8;
- }
-}
-
-__INLINE void X_8_SPLIT_T(data_t * restrict data0, size_t N, data_t * restrict LUT) {
- data_t *data2 = data0 + 2*N/4;
- data_t *data4 = data0 + 4*N/4;
- data_t *data6 = data0 + 6*N/4;
- data_t *data1 = data0 + 1*N/4;
- data_t *data3 = data0 + 3*N/4;
- data_t *data5 = data0 + 5*N/4;
- data_t *data7 = data0 + 7*N/4;
- size_t k, n4 = N/4;
-
- for(k=N/8/2/2;k>0;--k) {
- VS r0, r1, r2, r3, r4, r5, r6, r7,w;
- r0 = LOADSPR0(data0);
- r2 = LOADSPR1(data2);
- r1 = LOADSPR2(data1);
- r3 = LOADSPR3(data3);
- VSK_N(LOADSPRI(&LUT), &r0, &r1, &r2, &r3);
- STORESPR2(data1, r1);
- STORESPR3(data3, r3);
- r4 = LOADSPR2(data4);
- r6 = LOADSPR3(data6);
- VSK_N(LOADSPRI(&LUT), &r0, &r2, &r4, &r6);
- STORESPRIT0(&data0, r0); //data0 += 8;
- STORESPRIT1(&data2, r2); //data2 += 8;
- STORESPRIT2(&data4, r4); //data4 += 8;
- STORESPRIT3(&data6, r6); //data6 += 8;
- r1 = LOADSPR0(data1);
- r3 = LOADSPR1(data3);
- r5 = LOADSPR2(data5);
- r7 = LOADSPR3(data7);
- VSK_N(LOADSPRI(&LUT), &r1, &r3, &r5, &r7);
- STORESPRIT0(&data1, r1); //data1 += 8;
- STORESPRIT1(&data3, r3); //data3 += 8;
- STORESPRIT2(&data5, r5); //data5 += 8;
- STORESPRIT3(&data7, r7); //data7 += 8;
- }
-}
-__INLINE V LOAD2I(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld2.32 {%q0}, [%1, :128]!\n\t"
- : "=w" (o), "+r" (*addr)
- :
- );
-
- return o;
-}
-__INLINE V LOAD2I_0(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag0\n\t" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V LOAD2I_1(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag1\n\t" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V LOAD2I_2(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag2\n\t" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V LOAD2I_3(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag3\n\t" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V LOAD2I_4(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag4\n\t" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V LOAD2I_5(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag5\n\t" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V LOAD2I_6(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag6\n\t" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V LOAD2I_7(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag7\n\t" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-
-
-
-__INLINE V LOADI(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld1.32 {%q0}, [%1, :128]!\n\t" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V LOADI_2(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld1.32 {%q0}, [%1, :128]!\n\t @tag2" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V LOADI_3(const data_t **addr) {
- float32x4_t o;
- __asm__ ("vld1.32 {%q0}, [%1, :128]!\n\t @tag3" : "=w" (o), "+r" (*addr) : );
- return o;
-}
-__INLINE V HSP_MUL(V *d, const V *w) {
- V t;
- t = vcombine_f32(vmul_f32(vget_low_f32(*d), vget_low_f32(*w)),
- vmul_f32(vget_low_f32(*d), vget_high_f32(*w)));
- t = vcombine_f32(vmls_f32(vget_low_f32(t), vget_high_f32(*d), vget_high_f32(*w)),
- vmla_f32(vget_high_f32(t), vget_high_f32(*d), vget_low_f32(*w)));
- return t;
-}
-__INLINE V HSP_MULJ(V *d, const V *w) {
- V t;
- t = vcombine_f32(vmul_f32(vget_low_f32(*d), vget_low_f32(*w)),
- vmul_f32(vget_high_f32(*d), vget_low_f32(*w)));
- t = vcombine_f32(vmla_f32(vget_low_f32(t), vget_high_f32(*d), vget_high_f32(*w)),
- vmls_f32(vget_high_f32(t), vget_low_f32(*d), vget_high_f32(*w)));
- return t;
-}
-__INLINE V HSP_SUB_MULI(V *a, V *b) {
- return vcombine_f32(vadd_f32(vget_low_f32(*a), vget_high_f32(*b)), vsub_f32(vget_high_f32(*a), vget_low_f32(*b)));
-}
-__INLINE V HSP_ADD_MULI(V *a, V *b) {
- return vcombine_f32(vsub_f32(vget_low_f32(*a), vget_high_f32(*b)), vadd_f32(vget_high_f32(*a), vget_low_f32(*b)));
-}
-
-__INLINE void K_N_HSP(const V *w, V *r0, V *r1, V *r2, V *r3) {
- V uk, uk2, zk_p, zk_n, zk, zk_d;
-
- uk = *r0;
- uk2 = *r1;
- zk_p = HSP_MUL(r2, w);
- zk_n = HSP_MULJ(r3, w);
- zk = ADD(zk_p, zk_n);
- zk_d = SUB(zk_p, zk_n);
-
- *r2 = SUB(uk, zk);
- *r0 = ADD(uk, zk);
- *r3 = HSP_ADD_MULI(&uk2, &zk_d);
- *r1 = HSP_SUB_MULI(&uk2, &zk_d);
-}
-
-__INLINE void neon_shl8_ee(data_t *restrict out0, data_t *restrict out1,const data_t **restrict i0,const data_t **restrict i1,const data_t **restrict i2,const data_t **restrict i3,const data_t **restrict i4,const data_t **restrict i5,const data_t **restrict i6,const data_t **restrict i7) {
-
- V r0, r1, r2, r3, r4, r5, r6, r7;
- V t0, t1, t2, t3, t4, t5, t6, t7;
-
-
- t0 = LOAD2I_0(i0);
- t1 = LOAD2I_1(i1);
- t2 = LOAD2I_2(i2);
- t3 = LOAD2I_3(i3);
- t4 = ADD (t0, t1);
- t5 = SUB (t0, t1);
- t6 = ADD (t2, t3);
- t7 = SUB (t2, t3);
- r0 = ADD (t4, t6);
- r2 = SUB (t4, t6);
- r1 = HSP_SUB_MULI(&t5, &t7);
- r3 = HSP_ADD_MULI(&t5, &t7);
-
- t0 = LOAD2I_4(i4);
- t1 = LOAD2I_5(i5);
- t2 = LOAD2I_6(i6);
- t3 = LOAD2I_7(i7);
- r4 = ADD (t0, t1);
- r5 = SUB (t0, t1);
- r6 = ADD (t2, t3);
- r7 = SUB (t2, t3);
-
- t0 = r0; t1 = r2;
- t2 = ADD(r4, r6);
- t3 = SUB(r4, r6);
- r0 = ADD(t0, t2);
- r4 = SUB(t0, t2);
- r2 = HSP_SUB_MULI(&t1, &t3);
- r6 = HSP_ADD_MULI(&t1, &t3);
-
- V w = vld1q_f32(ee_w_data);
-
- K_N_HSP(&w,&r1,&r3,&r5,&r7);
- V uk, uk2, zk, zk_d;
-
- float32x4x2_t tmp1 = vtrnq_f32(r0, r2);
- r0 = tmp1.val[0];
- r2 = tmp1.val[1];
- float32x4x2_t tmp4 = vtrnq_f32(r1, r3);
- r1 = tmp4.val[0];
- r3 = tmp4.val[1];
- register V tt0 __asm__ ("q0") = r0;
- register V tt1 __asm__ ("q1") = r1;
- register V tt2 __asm__ ("q2") = r2;
- register V tt3 __asm__ ("q3") = r3;
- __asm__ __volatile__ ("vst2.32 {q0,q1}, [%0, :128]!\n\t" : "+&r" (out0): "w"(tt0), "w"(tt1) : "memory");
- __asm__ __volatile__ ("vst2.32 {q2,q3}, [%0, :128]!\n\t" : "+&r" (out1): "w"(tt2), "w"(tt3) : "memory");
-
- float32x4x2_t tmp2 = vtrnq_f32(r4, r6);
- r4 = tmp2.val[0];
- r6 = tmp2.val[1];
- float32x4x2_t tmp3 = vtrnq_f32(r5, r7);
- r5 = tmp3.val[0];
- r7 = tmp3.val[1];
- register V tt4 __asm__ ("q4") = r4;
- register V tt5 __asm__ ("q5") = r5;
- register V tt6 __asm__ ("q6") = r6;
- register V tt7 __asm__ ("q7") = r7;
-
- __asm__ __volatile__ ("vst2.32 {q4,q5}, [%0, :128]!\n\t" : "+&r" (out0): "w"(tt4), "w"(tt5) : "memory");
- __asm__ __volatile__ ("vst2.32 {q6,q7}, [%0, :128]!\n\t" : "+&r" (out1): "w"(tt6), "w"(tt7) : "memory");
-
-}
-
-__INLINE void neon_shl8_oo(data_t *restrict out0, data_t *restrict out1,const data_t **restrict i0,const data_t **restrict i1,const data_t **restrict i2,const data_t **restrict i3,const data_t **restrict i4,const data_t **restrict i5,const data_t **restrict i6,const data_t **restrict i7) {
-
- V r0, r1, r2, r3, r4, r5, r6, r7;
- V t0, t1, t2, t3, t4, t5, t6, t7;
-
- t0 = LOAD2I_0(i0);
- t1 = LOAD2I_1(i1);
- t2 = LOAD2I_2(i2);
- t3 = LOAD2I_3(i3);
- t4 = ADD (t0, t1);
- t5 = SUB (t0, t1);
- t6 = ADD (t2, t3);
- t7 = SUB (t2, t3);
- r0 = ADD (t4, t6);
- r2 = SUB (t4, t6);
- r1 = HSP_SUB_MULI(&t5, &t7);
- r3 = HSP_ADD_MULI(&t5, &t7);
-
- float32x4x2_t tmp1 = vtrnq_f32(r0, r2);
- r0 = tmp1.val[0];
- r2 = tmp1.val[1];
- float32x4x2_t tmp4 = vtrnq_f32(r1, r3);
- r1 = tmp4.val[0];
- r3 = tmp4.val[1];
- register V tt0 __asm__ ("q0") = r0;
- register V tt1 __asm__ ("q1") = r1;
- register V tt2 __asm__ ("q2") = r2;
- register V tt3 __asm__ ("q3") = r3;
- __asm__ __volatile__ ("vst2.32 {q0,q1}, [%0, :128]!\n\t" : "+&r" (out0): "w"(tt0), "w"(tt1) : "memory");
- __asm__ __volatile__ ("vst2.32 {q2,q3}, [%0, :128]!\n\t" : "+&r" (out1): "w"(tt2), "w"(tt3) : "memory");
-
-
-
- t0 = LOAD2I_4(i4);
- t1 = LOAD2I_5(i5);
- t2 = LOAD2I_6(i6);
- t3 = LOAD2I_7(i7);
- t4 = ADD (t0, t1);
- t5 = SUB (t0, t1);
- t6 = ADD (t2, t3);
- t7 = SUB (t2, t3);
- r4 = ADD (t4, t6);
- r6 = SUB (t4, t6);
- r5 = HSP_SUB_MULI(&t5, &t7);
- r7 = HSP_ADD_MULI(&t5, &t7);
-
- float32x4x2_t tmp2 = vtrnq_f32(r4, r6);
- r4 = tmp2.val[0];
- r6 = tmp2.val[1];
- float32x4x2_t tmp3 = vtrnq_f32(r5, r7);
- r5 = tmp3.val[0];
- r7 = tmp3.val[1];
-
-
- register V tt4 __asm__ ("q4") = r4;
- register V tt5 __asm__ ("q5") = r5;
- register V tt6 __asm__ ("q6") = r6;
- register V tt7 __asm__ ("q7") = r7;
-
- __asm__ __volatile__ ("vst2.32 {q4,q5}, [%0, :128]!\n\t" : "+&r" (out0): "w"(tt4), "w"(tt5) : "memory");
- __asm__ __volatile__ ("vst2.32 {q6,q7}, [%0, :128]!\n\t" : "+&r" (out1): "w"(tt6), "w"(tt7) : "memory");
-
-
-
-}
-
-static const __attribute__ ((aligned(16))) data_t eo_w_data[4] = {1.0f,0.70710678118654757273731092936941f, 0.0f,-0.70710678118654746171500846685376};
-
-
-__INLINE void neon_shl8_eo(data_t *restrict out0, data_t *restrict out1,const data_t **restrict i0,const data_t **restrict i1,const data_t **restrict i2,const data_t **restrict i3,const data_t **restrict i4,const data_t **restrict i5,const data_t **restrict i6,const data_t **restrict i7) {
- /*
- register V r0_1 __asm__ ("q0");
- register V r2_3 __asm__ ("q1");
- register V r4_5 __asm__ ("q2");
- register V r6_7 __asm__ ("q3");
- */
- const V w = vld1q_f32(eo_w_data);
-
- V r0_1, r2_3, r4_5, r6_7;
-
- register V r8_9 __asm__ ("q4");
- register V r10_11 __asm__ ("q5");
- register V r12_13 __asm__ ("q6");
- register V r14_15 __asm__ ("q7");
-
- {
- V t0, t1, t2, t3, t4, t5, t6, t7;
- t0 = LOAD2I_0(i0);
- t1 = LOAD2I_1(i1);
- t2 = LOAD2I_2(i2);
- t3 = LOAD2I_3(i3);
- t4 = ADD(t0, t1);
- t5 = SUB(t0, t1);
- t6 = ADD(t2, t3);
- t7 = SUB(t2, t3);
-
- t0 = ADD(t4, t6);
- t2 = SUB(t4, t6);
- t1 = HSP_SUB_MULI(&t5, &t7);
- t3 = HSP_ADD_MULI(&t5, &t7);
-
- float32x4x2_t tmp1 = vtrnq_f32(t0, t1);
- t0 = tmp1.val[0];
- t1 = tmp1.val[1];
- float32x4x2_t tmp2 = vtrnq_f32(t2, t3);
- t2 = tmp2.val[0];
- t3 = tmp2.val[1];
-
- r0_1 = t0;
- r2_3 = t2;
- r8_9 = t1;
- r10_11 = t3;
- __asm__ __volatile__ ("vswp d9,d10\n\t"
- "vst1.32 {d8,d9,d10,d11}, [%0, :128]!\n\t"
-// "vst1.32 {d8,d9}, [%0, :128]!\n\t"
-// "vst1.32 {d10,d11}, [%0, :128]!\n\t"
- : "+&r" (out1)
- : "w" (r8_9), "w" (r10_11)
- : "memory");
-
- }
- {
- V t0, t1, t2, t3, t4, t5, t6, t7;
- t0 = LOAD2I_4(i4);
- t1 = LOAD2I_5(i5);
- t2 = LOAD2I_6(i6);
- t3 = LOAD2I_7(i7);
- //t2 = HALFBLEND(t6, t7);
- //t3 = HALFBLEND(t7, t6);
- t4 = ADD(t0, t1);
- t5 = SUB(t0, t1);
- t6 = ADD(t2, t3);
- t7 = SUB(t2, t3);
- float32x4x2_t tmp1 = vtrnq_f32(t4, t5);
- r4_5 = tmp1.val[0];
- float32x4x2_t tmp2 = vtrnq_f32(t6, t7);
- r6_7 = tmp2.val[0];
- //t5 = MULI(t5);
- t0 = ADD(t6, t4);
- t2 = SUB(t6, t4);
- t1 = HSP_SUB_MULI(&t7, &t5);
- t3 = HSP_ADD_MULI(&t7, &t5);
-
- float32x4x2_t tmp3 = vtrnq_f32(t0, t1);
- r12_13 = tmp3.val[1];
- float32x4x2_t tmp4 = vtrnq_f32(t2, t3);
- r14_15 = tmp4.val[1];
-
-
- __asm__ __volatile__ ("vswp d13, d14\n\t"
- "vst1.32 {d12,d13,d14,d15}, [%0, :128]!\n\t"
-// "vst1.32 {d12,d13}, [%0, :128]!\n\t"
-// "vst1.32 {d14,d15}, [%0, :128]!\n\t"
- : "+&r" (out1)
- : "w" (r12_13), "w" (r14_15)
- : "memory");
-
-
- }
-
- K_N_HSP(&w,&r0_1,&r2_3,&r4_5,&r6_7);
-
- register V t0 __asm__ ("q0") = r0_1;
- register V t1 __asm__ ("q1") = r2_3;
- register V t2 __asm__ ("q2") = r4_5;
- register V t3 __asm__ ("q3") = r6_7;
-
- __asm__ __volatile__ ("vswp d1, d2\n\t"
- "vswp d5, d6\n\t"
- "vstmia %0!, {q0-q3}\n\t"
-// "vst1.32 {d0,d1}, [%0, :128]!\n\t"
-// "vst1.32 {d2,d3}, [%0, :128]!\n\t"
-// "vst1.32 {d4,d5}, [%0, :128]!\n\t"
-// "vst1.32 {d6,d7}, [%0, :128]\n\t"
- : "+&r" (out0)
- : "w" (t0), "w" (t1), "w" (t2), "w" (t3)
- : "memory");
-
-}
-static const __attribute__ ((aligned(16))) data_t oe_w_data[4] = {1.0f,0.70710678118654757273731092936941f, 0.0f,-0.70710678118654746171500846685376};
-
-__INLINE void neon_shl8_oe(data_t *restrict out0, data_t *restrict out1,const data_t **restrict i0,const data_t **restrict i1,const data_t **restrict i2,const data_t **restrict i3,const data_t **restrict i4,const data_t **restrict i5,const data_t **restrict i6,const data_t **restrict i7) {
- register V r0_1 __asm__ ("q0");
- register V r2_3 __asm__ ("q1");
- register V r4_5 __asm__ ("q2");
- register V r6_7 __asm__ ("q3");
-
- V r8_9, r10_11, r12_13, r14_15;
- const V w = vld1q_f32(oe_w_data);
-
- {
- V t0, t1, t2, t3, t4, t5, t6, t7;
- t0 = LOAD2I_0(i0);
- t1 = LOAD2I_1(i1);
- t6 = LOADI_2(i2);
- t7 = LOADI_3(i3);
-
- float32x2x2_t tmp0 = vtrn_f32(vget_low_f32(t6), vget_high_f32(t7));
- float32x2x2_t tmp1 = vtrn_f32(vget_low_f32(t7), vget_high_f32(t6));
- t2 = vcombine_f32(tmp0.val[0], tmp0.val[1]);
- t3 = vcombine_f32(tmp1.val[0], tmp1.val[1]);
-
- t4 = ADD(t0, t1);
- t5 = SUB(t0, t1);
- t6 = ADD(t2, t3);
- t7 = SUB(t2, t3);
- float32x4x2_t tmp2 = vtrnq_f32(t4, t5);
- r12_13 = tmp2.val[1];
- float32x4x2_t tmp3 = vtrnq_f32(t6, t7);
- r14_15 = tmp3.val[1];
-
- t0 = ADD(t4, t6);
- t2 = SUB(t4, t6);
- t1 = HSP_SUB_MULI(&t5, &t7);
- t3 = HSP_ADD_MULI(&t5, &t7);
- float32x4x2_t tmp4 = vtrnq_f32(t0, t1);
- r0_1 = tmp4.val[0];
- float32x4x2_t tmp5 = vtrnq_f32(t2, t3);
- r2_3 = tmp5.val[0];
- __asm__ __volatile__ ("vswp d1, d2\n\t"
- "vst1.32 {q0, q1}, [%0, :128]!\n\t"
-// "vst1.32 {q1}, [%0, :128]!\n\t"
- : "+&r" (out0)
- : "w" (r0_1), "w" (r2_3)
- : "memory");
- }
- {
- V t0, t1, t2, t3, t4, t5, t6, t7;
- t0 = LOAD2I_4(i4);
- t1 = LOAD2I_5(i5);
- t2 = LOAD2I_6(i6);
- t3 = LOAD2I_7(i7);
- t4 = ADD(t0, t1);
- t5 = SUB(t0, t1);
- t6 = ADD(t2, t3);
- t7 = SUB(t2, t3);
- t0 = ADD(t4, t6);
- t2 = SUB(t4, t6);
- t1 = HSP_SUB_MULI(&t5, &t7);
- t3 = HSP_ADD_MULI(&t5, &t7);
-
- float32x4x2_t tmp0 = vtrnq_f32(t0, t1);
- r4_5 = tmp0.val[0];
- r8_9 = tmp0.val[1];
- float32x4x2_t tmp1 = vtrnq_f32(t2, t3);
- r6_7 = tmp1.val[0];
- r10_11 = tmp1.val[1];
-
-
- __asm__ __volatile__ ("vswp d5, d6\n\t"
- "vst1.32 {q2, q3}, [%0, :128]!\n\t"
-// "vst1.32 {q3}, [%0, :128]!\n\t"
- : "+&r" (out0)
- : "w" (r4_5), "w" (r6_7)
- : "memory");
-
- }
-
- K_N_HSP(&w,&r8_9,&r10_11,&r12_13,&r14_15);
- register V t0 __asm__ ("q4") = r8_9;
- register V t1 __asm__ ("q5") = r10_11;
- register V t2 __asm__ ("q6") = r12_13;
- register V t3 __asm__ ("q7") = r14_15;
-
- __asm__ __volatile__ ("vswp d9, d10\n\t"
- "vswp d13, d14\n\t"
- "vstmia %0!, {q4-q7}\n\t"
-// "vst1.32 {q4}, [%0, :128]!\n\t"
-// "vst1.32 {q5}, [%0, :128]!\n\t"
-// "vst1.32 {q6}, [%0, :128]!\n\t"
-// "vst1.32 {q7}, [%0, :128]\n\t"
- : "+&r" (out1)
- : "w" (t0), "w" (t1), "w" (t2), "w" (t3)
- : "memory");
-
-
-}
-#endif
diff --git a/lib/ffts/src/neon_static.s b/lib/ffts/src/neon_static.s
new file mode 100644
index 0000000..9121c4b
--- /dev/null
+++ b/lib/ffts/src/neon_static.s
@@ -0,0 +1,1255 @@
+/*
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2016, Jukka Ojanen <jukka.ojanen@kolumbus.fi>
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+ .fpu neon
+
+.macro neon_static_e, forward=1
+ .align 4
+
+.if \forward
+#ifdef __APPLE__
+ .globl _neon_static_e_f
+_neon_static_e_f:
+#else
+ .globl neon_static_e_f
+neon_static_e_f:
+#endif
+.else
+#ifdef __APPLE__
+ .globl _neon_static_e_i
+_neon_static_e_i:
+#else
+ .globl neon_static_e_i
+neon_static_e_i:
+#endif
+.endif
+ push {r4-r12, lr}
+ vpush {q4-q7}
+
+ ldr lr, [r0, #40] @ p->N
+ ldr r12, [r0 ] @ p->offsets
+ ldr r3, [r0, #16] @ p->ee_ws
+
+ add r7, r1, lr
+ add r5, r1, lr, lsl #1
+ add r4, r1, lr, lsl #2
+ add r10, r7, lr, lsl #1
+ add r8, r7, lr, lsl #2
+
+ ldr r11, [r0, #28] @ p->i0
+
+ add r6, r4, lr, lsl #1
+ add r9, r10, lr, lsl #2
+
+ vld1.32 {d16, d17}, [r3, :128]
+1:
+ vld2.32 {q15}, [r10, :128]!
+ vld2.32 {q13}, [r8, :128]!
+ vld2.32 {q14}, [r7, :128]!
+ vld2.32 {q9}, [r4, :128]!
+ vld2.32 {q10}, [r1, :128]!
+ vld2.32 {q11}, [r6, :128]!
+ vld2.32 {q12}, [r5, :128]!
+ vsub.f32 q1, q14, q13
+ vld2.32 {q0}, [r9, :128]!
+ subs r11, r11, #1
+ vsub.f32 q2, q0, q15
+ vadd.f32 q0, q0, q15
+ vmul.f32 d10, d2, d17
+ vmul.f32 d11, d3, d16
+ vmul.f32 d12, d3, d17
+ vmul.f32 d6, d4, d17
+ vmul.f32 d7, d5, d16
+ vmul.f32 d8, d4, d16
+ vmul.f32 d9, d5, d17
+ vmul.f32 d13, d2, d16
+ vsub.f32 d7, d7, d6
+ vadd.f32 d11, d11, d10
+ vsub.f32 q1, q12, q11
+ vsub.f32 q2, q10, q9
+ vadd.f32 d6, d9, d8
+ vadd.f32 q4, q14, q13
+ vadd.f32 q11, q12, q11
+ vadd.f32 q12, q10, q9
+ vsub.f32 d10, d13, d12
+ vsub.f32 q7, q4, q0
+ vsub.f32 q9, q12, q11
+ vsub.f32 q13, q5, q3
+.if \forward
+ vsub.f32 d29, d5, d2
+.else
+ vadd.f32 d29, d5, d2
+.endif
+ vadd.f32 q5, q5, q3
+ vadd.f32 q10, q4, q0
+ vadd.f32 q11, q12, q11
+.if \forward
+ vadd.f32 d31, d5, d2
+ vadd.f32 d28, d4, d3
+ vsub.f32 d30, d4, d3
+ vsub.f32 d5, d19, d14
+ vsub.f32 d7, d31, d26
+.else
+ vsub.f32 d31, d5, d2
+ vsub.f32 d28, d4, d3
+ vadd.f32 d30, d4, d3
+ vadd.f32 d5, d19, d14
+ vadd.f32 d7, d31, d26
+.endif
+ vadd.f32 q1, q14, q5
+ vadd.f32 q0, q11, q10
+.if \forward
+ vadd.f32 d6, d30, d27
+ vadd.f32 d4, d18, d15
+ vadd.f32 d13, d19, d14
+ vsub.f32 d12, d18, d15
+ vadd.f32 d15, d31, d26
+.else
+ vsub.f32 d6, d30, d27
+ vsub.f32 d4, d18, d15
+ vsub.f32 d13, d19, d14
+ vadd.f32 d12, d18, d15
+ vsub.f32 d15, d31, d26
+.endif
+ ldr r3, [r12], #4
+ vtrn.32 q1, q3
+ ldr lr, [r12], #4
+ vtrn.32 q0, q2
+ add r3, r2, r3, lsl #2
+ vsub.f32 q4, q11, q10
+ add lr, r2, lr, lsl #2
+ vsub.f32 q5, q14, q5
+.if \forward
+ vsub.f32 d14, d30, d27
+.else
+ vadd.f32 d14, d30, d27
+.endif
+ vst2.32 {q0, q1}, [r3, :128]!
+ vst2.32 {q2, q3}, [lr, :128]!
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+ vst2.32 {q4, q5}, [r3, :128]!
+ vst2.32 {q6, q7}, [lr, :128]!
+ bne 1b
+
+ ldr r11, [r0, #12]
+ vld2.32 {q9}, [r5, :128]!
+ vld2.32 {q13}, [r1, :128]!
+ vld2.32 {q12}, [r4, :128]!
+ vld2.32 {q0}, [r7, :128]!
+ vsub.f32 q11, q13, q12
+ vld2.32 {q8}, [r6, :128]!
+ vadd.f32 q12, q13, q12
+ vsub.f32 q10, q9, q8
+ vadd.f32 q8, q9, q8
+ vadd.f32 q9, q12, q8
+.if \forward
+ vsub.f32 d9, d23, d20
+ vadd.f32 d11, d23, d20
+.else
+ vadd.f32 d9, d23, d20
+ vsub.f32 d11, d23, d20
+.endif
+ vsub.f32 q8, q12, q8
+.if \forward
+ vadd.f32 d8, d22, d21
+ vsub.f32 d10, d22, d21
+.else
+ vsub.f32 d8, d22, d21
+ vadd.f32 d10, d22, d21
+.endif
+ ldr r3, [r12], #4
+ vld1.32 {d20, d21}, [r11, :128]
+ ldr lr, [r12], #4
+ vtrn.32 q9, q4
+ add r3, r2, r3, lsl #2
+ vtrn.32 q8, q5
+ add lr, r2, lr, lsl #2
+ vswp d9, d10
+ vst1.32 {d8,d9,d10,d11}, [lr, :128]!
+ vld2.32 {q13}, [r10, :128]!
+ vld2.32 {q15}, [r9, :128]!
+ vld2.32 {q11}, [r8, :128]!
+ vsub.f32 q14, q15, q13
+ vsub.f32 q12, q0, q11
+ vadd.f32 q11, q0, q11
+ vadd.f32 q13, q15, q13
+.if \forward
+ vsub.f32 d13, d29, d24
+.else
+ vadd.f32 d13, d29, d24
+.endif
+ vadd.f32 q15, q13, q11
+.if \forward
+ vadd.f32 d12, d28, d25
+ vadd.f32 d15, d29, d24
+ vsub.f32 d14, d28, d25
+.else
+ vsub.f32 d12, d28, d25
+ vsub.f32 d15, d29, d24
+ vadd.f32 d14, d28, d25
+.endif
+ vtrn.32 q15, q6
+ vsub.f32 q15, q13, q11
+ vtrn.32 q15, q7
+ vswp d13, d14
+ vst1.32 {d12,d13,d14,d15}, [lr, :128]!
+ vtrn.32 q13, q14
+ vtrn.32 q11, q12
+ vmul.f32 d24, d26, d21
+ vmul.f32 d28, d27, d20
+ vmul.f32 d25, d26, d20
+ vmul.f32 d26, d27, d21
+ vmul.f32 d27, d22, d21
+ vmul.f32 d30, d23, d20
+ vmul.f32 d29, d23, d21
+ vmul.f32 d22, d22, d20
+ vsub.f32 d21, d28, d24
+ vadd.f32 d20, d26, d25
+ vadd.f32 d25, d30, d27
+ vsub.f32 d24, d22, d29
+ vadd.f32 q11, q12, q10
+ ldr r11, [r0, #32] @ p->i1
+ vsub.f32 q10, q12, q10
+ vadd.f32 q0, q9, q11
+ vsub.f32 q2, q9, q11
+.if \forward
+ vsub.f32 d3, d17, d20
+ vadd.f32 d7, d17, d20
+ vadd.f32 d2, d16, d21
+ vsub.f32 d6, d16, d21
+.else
+ vadd.f32 d3, d17, d20
+ vsub.f32 d7, d17, d20
+ vsub.f32 d2, d16, d21
+ vadd.f32 d6, d16, d21
+.endif
+ cmp r11, #0
+ vswp d1, d2
+ vswp d5, d6
+ vstmia r3!, {q0-q3}
+ beq 4f
+
+2:
+ vld2.32 {q8}, [r6, :128]!
+ vld2.32 {q9}, [r5, :128]!
+ vld2.32 {q10}, [r4, :128]!
+ vld2.32 {q13}, [r1, :128]!
+ vadd.f32 q11, q9, q8
+ vsub.f32 q8, q9, q8
+ vsub.f32 q9, q13, q10
+ vadd.f32 q12, q13, q10
+ subs r11, r11, #1
+ vld2.32 {q10}, [r9, :128]!
+ vld2.32 {q13}, [r7, :128]!
+ vsub.f32 q2, q12, q11
+.if \forward
+ vadd.f32 d7, d19, d16
+ vsub.f32 d3, d19, d16
+ vsub.f32 d6, d18, d17
+ vadd.f32 d2, d18, d17
+.else
+ vsub.f32 d7, d19, d16
+ vadd.f32 d3, d19, d16
+ vadd.f32 d6, d18, d17
+ vsub.f32 d2, d18, d17
+.endif
+ vld2.32 {q9}, [r10, :128]!
+ vld2.32 {q8}, [r8, :128]!
+ vadd.f32 q0, q12, q11
+ vadd.f32 q11, q13, q8
+ vadd.f32 q12, q10, q9
+ vsub.f32 q8, q13, q8
+ vsub.f32 q9, q10, q9
+ vsub.f32 q6, q12, q11
+ vadd.f32 q4, q12, q11
+ vtrn.32 q0, q2
+ ldr r3, [r12], #4
+.if \forward
+ vadd.f32 d15, d19, d16
+.else
+ vsub.f32 d15, d19, d16
+.endif
+ ldr lr, [r12], #4
+.if \forward
+ vsub.f32 d11, d19, d16
+ vsub.f32 d14, d18, d17
+ vadd.f32 d10, d18, d17
+.else
+ vadd.f32 d11, d19, d16
+ vadd.f32 d14, d18, d17
+ vsub.f32 d10, d18, d17
+.endif
+ add r3, r2, r3, lsl #2
+ vtrn.32 q1, q3
+ add lr, r2, lr, lsl #2
+ vst2.32 {q0,q1}, [r3, :128]!
+ vst2.32 {q2,q3}, [lr, :128]!
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+ vst2.32 {q4, q5}, [r3, :128]!
+ vst2.32 {q6, q7}, [lr, :128]!
+ bne 2b
+
+ ldr r3, [r0, #16] @ p->ee_ws
+ ldr r11, [r0, #32] @ p->i1
+ vld1.32 {d16, d17}, [r3, :128]
+3:
+ vld2.32 {q15}, [r5, :128]!
+ vld2.32 {q13}, [r4, :128]!
+ vld2.32 {q14}, [r1, :128]!
+ vld2.32 {q9}, [r10, :128]!
+ vld2.32 {q10}, [r9, :128]!
+ vld2.32 {q11}, [r8, :128]!
+ vld2.32 {q12}, [r7, :128]!
+ vsub.f32 q1, q14, q13
+ vld2.32 {q0}, [r6, :128]!
+ subs r11, r11, #1
+ vsub.f32 q2, q0, q15
+ vadd.f32 q0, q0, q15
+ vmul.f32 d10, d2, d17
+ vmul.f32 d11, d3, d16
+ vmul.f32 d12, d3, d17
+ vmul.f32 d6, d4, d17
+ vmul.f32 d7, d5, d16
+ vmul.f32 d8, d4, d16
+ vmul.f32 d9, d5, d17
+ vmul.f32 d13, d2, d16
+ vsub.f32 d7, d7, d6
+ vadd.f32 d11, d11, d10
+ vsub.f32 q1, q12, q11
+ vsub.f32 q2, q10, q9
+ vadd.f32 d6, d9, d8
+ vadd.f32 q4, q14, q13
+ vadd.f32 q11, q12, q11
+ vadd.f32 q12, q10, q9
+ vsub.f32 d10, d13, d12
+ vsub.f32 q7, q4, q0
+ vsub.f32 q9, q12, q11
+ vsub.f32 q13, q5, q3
+.if \forward
+ vsub.f32 d29, d5, d2
+.else
+ vadd.f32 d29, d5, d2
+.endif
+ vadd.f32 q5, q5, q3
+ vadd.f32 q10, q4, q0
+ vadd.f32 q11, q12, q11
+.if \forward
+ vadd.f32 d31, d5, d2
+ vadd.f32 d28, d4, d3
+ vsub.f32 d30, d4, d3
+ vsub.f32 d5, d19, d14
+ vsub.f32 d7, d31, d26
+.else
+ vsub.f32 d31, d5, d2
+ vsub.f32 d28, d4, d3
+ vadd.f32 d30, d4, d3
+ vadd.f32 d5, d19, d14
+ vadd.f32 d7, d31, d26
+.endif
+ vadd.f32 q1, q14, q5
+ vadd.f32 q0, q11, q10
+.if \forward
+ vadd.f32 d6, d30, d27
+ vadd.f32 d4, d18, d15
+ vadd.f32 d13, d19, d14
+ vsub.f32 d12, d18, d15
+ vadd.f32 d15, d31, d26
+.else
+ vsub.f32 d6, d30, d27
+ vsub.f32 d4, d18, d15
+ vsub.f32 d13, d19, d14
+ vadd.f32 d12, d18, d15
+ vsub.f32 d15, d31, d26
+.endif
+ ldr r3, [r12], #4
+ vtrn.32 q1, q3
+ ldr lr, [r12], #4
+ vtrn.32 q0, q2
+ add r3, r2, r3, lsl #2
+ vsub.f32 q4, q11, q10
+ add lr, r2, lr, lsl #2
+ vsub.f32 q5, q14, q5
+.if \forward
+ vsub.f32 d14, d30, d27
+.else
+ vadd.f32 d14, d30, d27
+.endif
+ vst2.32 {q0, q1}, [r3, :128]!
+ vst2.32 {q2, q3}, [lr, :128]!
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+ vst2.32 {q4, q5}, [r3, :128]!
+ vst2.32 {q6, q7}, [lr, :128]!
+ bne 3b
+
+4:
+ vpop {q4-q7}
+ pop {r4-r12, pc}
+.endm
+
+.macro neon_static_o, forward=1
+ .align 4
+
+.if \forward
+#ifdef __APPLE__
+ .globl _neon_static_o_f
+_neon_static_o_f:
+#else
+ .globl neon_static_o_f
+neon_static_o_f:
+#endif
+.else
+#ifdef __APPLE__
+ .globl _neon_static_o_i
+_neon_static_o_i:
+#else
+ .globl neon_static_o_i
+neon_static_o_i:
+#endif
+.endif
+ push {r4-r12, lr}
+ vpush {q4-q7}
+
+ ldr lr, [r0, #40] @ p->N
+ ldr r12, [r0 ] @ p->offsets
+ ldr r3, [r0, #16] @ p->ee_ws
+
+ add r7, r1, lr
+ add r5, r1, lr, lsl #1
+ add r4, r1, lr, lsl #2
+ add r10, r7, lr, lsl #1
+ add r8, r7, lr, lsl #2
+
+ ldr r11, [r0, #28] @ p->i0
+
+ add r6, r4, lr, lsl #1
+ add r9, r10, lr, lsl #2
+
+ vld1.32 {d16, d17}, [r3, :128]
+1:
+ vld2.32 {q15}, [r10, :128]!
+ vld2.32 {q13}, [r8, :128]!
+ vld2.32 {q14}, [r7, :128]!
+ vld2.32 {q9}, [r4, :128]!
+ vld2.32 {q10}, [r1, :128]!
+ vld2.32 {q11}, [r6, :128]!
+ vld2.32 {q12}, [r5, :128]!
+ vsub.f32 q1, q14, q13
+ vld2.32 {q0}, [r9, :128]!
+ subs r11, r11, #1
+ vsub.f32 q2, q0, q15
+ vadd.f32 q0, q0, q15
+ vmul.f32 d10, d2, d17
+ vmul.f32 d11, d3, d16
+ vmul.f32 d12, d3, d17
+ vmul.f32 d6, d4, d17
+ vmul.f32 d7, d5, d16
+ vmul.f32 d8, d4, d16
+ vmul.f32 d9, d5, d17
+ vmul.f32 d13, d2, d16
+ vsub.f32 d7, d7, d6
+ vadd.f32 d11, d11, d10
+ vsub.f32 q1, q12, q11
+ vsub.f32 q2, q10, q9
+ vadd.f32 d6, d9, d8
+ vadd.f32 q4, q14, q13
+ vadd.f32 q11, q12, q11
+ vadd.f32 q12, q10, q9
+ vsub.f32 d10, d13, d12
+ vsub.f32 q7, q4, q0
+ vsub.f32 q9, q12, q11
+ vsub.f32 q13, q5, q3
+.if \forward
+ vsub.f32 d29, d5, d2
+.else
+ vadd.f32 d29, d5, d2
+.endif
+ vadd.f32 q5, q5, q3
+ vadd.f32 q10, q4, q0
+ vadd.f32 q11, q12, q11
+.if \forward
+ vadd.f32 d31, d5, d2
+ vadd.f32 d28, d4, d3
+ vsub.f32 d30, d4, d3
+ vsub.f32 d5, d19, d14
+ vsub.f32 d7, d31, d26
+.else
+ vsub.f32 d31, d5, d2
+ vsub.f32 d28, d4, d3
+ vadd.f32 d30, d4, d3
+ vadd.f32 d5, d19, d14
+ vadd.f32 d7, d31, d26
+.endif
+ vadd.f32 q1, q14, q5
+ vadd.f32 q0, q11, q10
+.if \forward
+ vadd.f32 d6, d30, d27
+ vadd.f32 d4, d18, d15
+ vadd.f32 d13, d19, d14
+ vsub.f32 d12, d18, d15
+ vadd.f32 d15, d31, d26
+.else
+ vsub.f32 d6, d30, d27
+ vsub.f32 d4, d18, d15
+ vsub.f32 d13, d19, d14
+ vadd.f32 d12, d18, d15
+ vsub.f32 d15, d31, d26
+.endif
+ ldr r3, [r12], #4
+ vtrn.32 q1, q3
+ ldr lr, [r12], #4
+ vtrn.32 q0, q2
+ add r3, r2, r3, lsl #2
+ vsub.f32 q4, q11, q10
+ add lr, r2, lr, lsl #2
+ vsub.f32 q5, q14, q5
+.if \forward
+ vsub.f32 d14, d30, d27
+.else
+ vadd.f32 d14, d30, d27
+.endif
+ vst2.32 {q0, q1}, [r3, :128]!
+ vst2.32 {q2, q3}, [lr, :128]!
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+ vst2.32 {q4, q5}, [r3, :128]!
+ vst2.32 {q6, q7}, [lr, :128]!
+ bne 1b
+
+ ldr r11, [r0, #32] @ p->i1
+ cmp r11, #0
+ beq 3f
+2:
+ vld2.32 {q8}, [r6, :128]!
+ vld2.32 {q9}, [r5, :128]!
+ vld2.32 {q10}, [r4, :128]!
+ vld2.32 {q13}, [r1, :128]!
+ vadd.f32 q11, q9, q8
+ vsub.f32 q8, q9, q8
+ vsub.f32 q9, q13, q10
+ vadd.f32 q12, q13, q10
+ subs r11, r11, #1
+ vld2.32 {q10}, [r9, :128]!
+ vld2.32 {q13}, [r7, :128]!
+ vsub.f32 q2, q12, q11
+.if \forward
+ vadd.f32 d7, d19, d16
+ vsub.f32 d3, d19, d16
+ vsub.f32 d6, d18, d17
+ vadd.f32 d2, d18, d17
+.else
+ vsub.f32 d7, d19, d16
+ vadd.f32 d3, d19, d16
+ vadd.f32 d6, d18, d17
+ vsub.f32 d2, d18, d17
+.endif
+ vld2.32 {q9}, [r10, :128]!
+ vld2.32 {q8}, [r8, :128]!
+ vadd.f32 q0, q12, q11
+ vadd.f32 q11, q13, q8
+ vadd.f32 q12, q10, q9
+ vsub.f32 q8, q13, q8
+ vsub.f32 q9, q10, q9
+ vsub.f32 q6, q12, q11
+ vadd.f32 q4, q12, q11
+
+ vtrn.32 q0, q2
+ ldr r3, [r12], #4
+.if \forward
+ vadd.f32 d15, d19, d16
+.else
+ vsub.f32 d15, d19, d16
+.endif
+ ldr lr, [r12], #4
+.if \forward
+ vsub.f32 d11, d19, d16
+ vsub.f32 d14, d18, d17
+ vadd.f32 d10, d18, d17
+.else
+ vadd.f32 d11, d19, d16
+ vadd.f32 d14, d18, d17
+ vsub.f32 d10, d18, d17
+.endif
+ add r3, r2, r3, lsl #2
+ vtrn.32 q1, q3
+ add lr, r2, lr, lsl #2
+ vst2.32 {q0,q1}, [r3, :128]!
+ vst2.32 {q2,q3}, [lr, :128]!
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+ vst2.32 {q4,q5}, [r3, :128]!
+ vst2.32 {q6,q7}, [lr, :128]!
+ bne 2b
+
+3:
+ ldr r11, [r0, #8]
+ vld1.32 {q8}, [r5, :128]!
+ vld1.32 {q10}, [r6, :128]!
+ vld2.32 {q11}, [r4, :128]!
+ vld2.32 {q13}, [r1, :128]!
+ vld2.32 {q15}, [r8, :128]!
+ vorr d25, d17, d17
+ vorr d24, d20, d20
+ vorr d20, d16, d16
+ vsub.f32 q9, q13, q11
+ vadd.f32 q11, q13, q11
+ ldr r3, [r12], #4
+ vtrn.32 d24, d25
+ ldr lr, [r12], #4
+ vtrn.32 d20, d21
+ add r3, r2, r3, lsl #2
+ vsub.f32 q8, q10, q12
+ add lr, r2, lr, lsl #2
+ vadd.f32 q10, q10, q12
+ vadd.f32 q0, q11, q10
+.if \forward
+ vsub.f32 d25, d19, d16
+ vadd.f32 d27, d19, d16
+.else
+ vadd.f32 d25, d19, d16
+ vsub.f32 d27, d19, d16
+.endif
+ vsub.f32 q1, q11, q10
+.if \forward
+ vadd.f32 d24, d18, d17
+ vsub.f32 d26, d18, d17
+.else
+ vsub.f32 d24, d18, d17
+ vadd.f32 d26, d18, d17
+.endif
+ vtrn.32 q0, q12
+ vtrn.32 q1, q13
+ vld1.32 {d24, d25}, [r11, :128]
+ vswp d1, d2
+ vst1.32 {q0, q1}, [r3, :128]!
+ vld2.32 {q0}, [r7, :128]!
+ vadd.f32 q1, q0, q15
+ vld2.32 {q13}, [r10, :128]!
+ vld2.32 {q14}, [r9, :128]!
+ vsub.f32 q15, q0, q15
+ vsub.f32 q0, q14, q13
+ vadd.f32 q3, q14, q13
+ vadd.f32 q2, q3, q1
+.if \forward
+ vsub.f32 d29, d1, d30
+ vadd.f32 d27, d1, d30
+.else
+ vadd.f32 d29, d1, d30
+ vsub.f32 d27, d1, d30
+.endif
+ vsub.f32 q3, q3, q1
+.if \forward
+ vadd.f32 d28, d0, d31
+ vsub.f32 d26, d0, d31
+.else
+ vsub.f32 d28, d0, d31
+ vadd.f32 d26, d0, d31
+.endif
+ vtrn.32 q2, q14
+ vtrn.32 q3, q13
+ vswp d5, d6
+ vst1.32 {q2, q3}, [r3, :128]!
+ vtrn.32 q11, q9
+ vtrn.32 q10, q8
+ vmul.f32 d20, d18, d25
+ vmul.f32 d22, d19, d24
+ vmul.f32 d21, d19, d25
+ vmul.f32 d18, d18, d24
+ vmul.f32 d19, d16, d25
+ vmul.f32 d30, d17, d24
+ vmul.f32 d23, d16, d24
+ vmul.f32 d24, d17, d25
+ vadd.f32 d17, d22, d20
+ vsub.f32 d16, d18, d21
+ ldr r3, [r0, #16] @ p->ee_ws
+ vsub.f32 d21, d30, d19
+ ldr r11, [r0, #32] @ p->i1
+ vadd.f32 d20, d24, d23
+ vadd.f32 q9, q8, q10
+ vsub.f32 q8, q8, q10
+ vadd.f32 q4, q14, q9
+ vsub.f32 q6, q14, q9
+.if \forward
+ vsub.f32 d11, d27, d16
+ vadd.f32 d15, d27, d16
+ vadd.f32 d10, d26, d17
+ vsub.f32 d14, d26, d17
+.else
+ vadd.f32 d11, d27, d16
+ vsub.f32 d15, d27, d16
+ vsub.f32 d10, d26, d17
+ vadd.f32 d14, d26, d17
+.endif
+ cmp r11, #0
+ vswp d9, d10
+ vswp d13, d14
+ vstmia lr!, {q4-q7}
+ beq 5f
+
+ vld1.32 {d16, d17}, [r3, :128]
+4:
+ vld2.32 {q15}, [r5, :128]!
+ vld2.32 {q13}, [r4, :128]!
+ vld2.32 {q14}, [r1, :128]!
+ vld2.32 {q9}, [r10, :128]!
+ vld2.32 {q10}, [r9, :128]!
+ vld2.32 {q11}, [r8, :128]!
+ vld2.32 {q12}, [r7, :128]!
+ vsub.f32 q1, q14, q13
+ vld2.32 {q0}, [r6, :128]!
+ subs r11, r11, #1
+ vsub.f32 q2, q0, q15
+ vadd.f32 q0, q0, q15
+ vmul.f32 d10, d2, d17
+ vmul.f32 d11, d3, d16
+ vmul.f32 d12, d3, d17
+ vmul.f32 d6, d4, d17
+ vmul.f32 d7, d5, d16
+ vmul.f32 d8, d4, d16
+ vmul.f32 d9, d5, d17
+ vmul.f32 d13, d2, d16
+ vsub.f32 d7, d7, d6
+ vadd.f32 d11, d11, d10
+ vsub.f32 q1, q12, q11
+ vsub.f32 q2, q10, q9
+ vadd.f32 d6, d9, d8
+ vadd.f32 q4, q14, q13
+ vadd.f32 q11, q12, q11
+ vadd.f32 q12, q10, q9
+ vsub.f32 d10, d13, d12
+ vsub.f32 q7, q4, q0
+ vsub.f32 q9, q12, q11
+ vsub.f32 q13, q5, q3
+.if \forward
+ vsub.f32 d29, d5, d2
+.else
+ vadd.f32 d29, d5, d2
+.endif
+ vadd.f32 q5, q5, q3
+ vadd.f32 q10, q4, q0
+ vadd.f32 q11, q12, q11
+.if \forward
+ vadd.f32 d31, d5, d2
+ vadd.f32 d28, d4, d3
+ vsub.f32 d30, d4, d3
+ vsub.f32 d5, d19, d14
+ vsub.f32 d7, d31, d26
+.else
+ vsub.f32 d31, d5, d2
+ vsub.f32 d28, d4, d3
+ vadd.f32 d30, d4, d3
+ vadd.f32 d5, d19, d14
+ vadd.f32 d7, d31, d26
+.endif
+ vadd.f32 q1, q14, q5
+ vadd.f32 q0, q11, q10
+.if \forward
+ vadd.f32 d6, d30, d27
+ vadd.f32 d4, d18, d15
+ vadd.f32 d13, d19, d14
+ vsub.f32 d12, d18, d15
+ vadd.f32 d15, d31, d26
+.else
+ vsub.f32 d6, d30, d27
+ vsub.f32 d4, d18, d15
+ vsub.f32 d13, d19, d14
+ vadd.f32 d12, d18, d15
+ vsub.f32 d15, d31, d26
+.endif
+ ldr r3, [r12], #4
+ vtrn.32 q1, q3
+ ldr lr, [r12], #4
+ vtrn.32 q0, q2
+ add r3, r2, r3, lsl #2
+ vsub.f32 q4, q11, q10
+ add lr, r2, lr, lsl #2
+ vsub.f32 q5, q14, q5
+.if \forward
+ vsub.f32 d14, d30, d27
+.else
+ vadd.f32 d14, d30, d27
+.endif
+ vst2.32 {q0, q1}, [r3, :128]!
+ vst2.32 {q2, q3}, [lr, :128]!
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+ vst2.32 {q4, q5}, [r3, :128]!
+ vst2.32 {q6, q7}, [lr, :128]!
+ bne 4b
+
+5:
+ vpop {q4-q7}
+ pop {r4-r12, pc}
+.endm
+
+.macro neon_static_x4, forward=1
+ .align 4
+
+.if \forward
+#ifdef __APPLE__
+ .globl _neon_static_x4_f
+_neon_static_x4_f:
+#else
+ .globl neon_static_x4_f
+neon_static_x4_f:
+#endif
+.else
+#ifdef __APPLE__
+ .globl _neon_static_x4_i
+_neon_static_x4_i:
+#else
+ .globl neon_static_x4_i
+neon_static_x4_i:
+#endif
+.endif
+ add r3, r0, #64
+ vpush {q4-q7}
+
+ vld1.32 {q2, q3}, [r1, :128]
+ vld1.32 {q12, q13}, [r3, :128]!
+ mov r2, r0
+ vmul.f32 q0, q13, q3
+ vld1.32 {q14, q15}, [r3, :128]
+ vmul.f32 q5, q12, q2
+ vld1.32 {q8, q9}, [r0, :128]!
+ vmul.f32 q1, q14, q2
+ vld1.32 {q10, q11}, [r0, :128]
+ vmul.f32 q4, q14, q3
+ vmul.f32 q14, q12, q3
+ vmul.f32 q13, q13, q2
+ vmul.f32 q12, q15, q3
+ vmul.f32 q2, q15, q2
+ vsub.f32 q0, q5, q0
+ vadd.f32 q13, q13, q14
+ vadd.f32 q12, q12, q1
+ vsub.f32 q1, q2, q4
+ vadd.f32 q15, q0, q12
+ vsub.f32 q12, q0, q12
+ vadd.f32 q14, q13, q1
+ vsub.f32 q13, q13, q1
+ vadd.f32 q0, q8, q15
+ vadd.f32 q1, q9, q14
+.if \forward
+ vadd.f32 q2, q10, q13
+.else
+ vsub.f32 q2, q10, q13
+.endif
+ vsub.f32 q4, q8, q15
+.if \forward
+ vsub.f32 q3, q11, q12
+.else
+ vadd.f32 q3, q11, q12
+.endif
+ vst1.32 {q0, q1}, [r2, :128]!
+ vsub.f32 q5, q9, q14
+.if \forward
+ vsub.f32 q6, q10, q13
+ vadd.f32 q7, q11, q12
+.else
+ vadd.f32 q6, q10, q13
+ vsub.f32 q7, q11, q12
+.endif
+ vst1.32 {q2, q3}, [r2, :128]!
+ vst1.32 {q4, q5}, [r2, :128]!
+ vst1.32 {q6, q7}, [r2, :128]
+
+ vpop {q4-q7}
+ bx lr
+.endm
+
+.macro neon_static_x8, forward=1
+ .align 4
+
+.if \forward
+#ifdef __APPLE__
+ .globl _neon_static_x8_f
+_neon_static_x8_f:
+#else
+ .globl neon_static_x8_f
+neon_static_x8_f:
+#endif
+.else
+#ifdef __APPLE__
+ .globl _neon_static_x8_i
+_neon_static_x8_i:
+#else
+ .globl neon_static_x8_i
+neon_static_x8_i:
+#endif
+.endif
+ push {r4-r8, lr}
+ vpush {q4-q7}
+
+ add r4, r0, r1, lsl #1 @ data2
+ add r3, r0, r1 @ data1
+ add r6, r4, r1, lsl #1 @ data4
+ add r5, r4, r1 @ data3
+ add r8, r6, r1, lsl #1 @ data6
+ add r7, r6, r1 @ data5
+ add r12, r8, r1 @ data7
+
+1:
+ vld1.32 {q2, q3}, [r2, :128]!
+ subs r1, r1, #32
+ vld1.32 {q14, q15}, [r5, :128]
+ vmul.f32 q12, q15, q2
+ vld1.32 {q10, q11}, [r4, :128]
+ vmul.f32 q8, q14, q3
+ vmul.f32 q13, q14, q2
+ vmul.f32 q9, q10, q3
+ vmul.f32 q1, q10, q2
+ vmul.f32 q0, q11, q2
+ vmul.f32 q14, q11, q3
+ vmul.f32 q15, q15, q3
+ vsub.f32 q10, q12, q8
+ vld1.32 {q2, q3}, [r2, :128]!
+ vadd.f32 q11, q0, q9
+ vadd.f32 q8, q15, q13
+ vsub.f32 q9, q1, q14
+ vld1.32 {q12, q13}, [r3, :128]
+ vsub.f32 q15, q11, q10
+ vsub.f32 q14, q9, q8
+.if \forward
+ vadd.f32 q4, q12, q15
+ vsub.f32 q6, q12, q15
+ vsub.f32 q5, q13, q14
+ vadd.f32 q7, q13, q14
+.else
+ vsub.f32 q4, q12, q15
+ vadd.f32 q6, q12, q15
+ vadd.f32 q5, q13, q14
+ vsub.f32 q7, q13, q14
+.endif
+ vld1.32 {q14, q15}, [r8, :128]
+ vld1.32 {q12, q13}, [r6, :128]
+ vmul.f32 q1, q14, q2
+ vmul.f32 q0, q14, q3
+ vst1.32 {q4, q5}, [r3, :128]
+ vmul.f32 q14, q15, q3
+ vmul.f32 q4, q15, q2
+ vadd.f32 q15, q9, q8
+ vst1.32 {q6, q7}, [r5, :128]
+ vmul.f32 q8, q12, q3
+ vmul.f32 q5, q13, q3
+ vmul.f32 q12, q12, q2
+ vmul.f32 q9, q13, q2
+ vadd.f32 q14, q14, q1
+ vsub.f32 q13, q4, q0
+ vadd.f32 q0, q9, q8
+ vld1.32 {q8, q9}, [r0, :128]
+ vadd.f32 q1, q11, q10
+ vsub.f32 q12, q12, q5
+ vadd.f32 q11, q8, q15
+ vsub.f32 q8, q8, q15
+ vadd.f32 q2, q12, q14
+ vsub.f32 q10, q0, q13
+ vadd.f32 q15, q0, q13
+ vadd.f32 q13, q9, q1
+ vsub.f32 q9, q9, q1
+ vsub.f32 q12, q12, q14
+ vadd.f32 q0, q11, q2
+ vadd.f32 q1, q13, q15
+ vsub.f32 q4, q11, q2
+.if \forward
+ vadd.f32 q2, q8, q10
+ vsub.f32 q3, q9, q12
+.else
+ vsub.f32 q2, q8, q10
+ vadd.f32 q3, q9, q12
+.endif
+ vst1.32 {q0, q1}, [r0, :128]!
+ vsub.f32 q5, q13, q15
+ vld1.32 {q14, q15}, [r12, :128]
+.if \forward
+ vadd.f32 q7, q9, q12
+.else
+ vsub.f32 q7, q9, q12
+.endif
+ vld1.32 {q12, q13}, [r7, :128]
+ vst1.32 {q2, q3}, [r4, :128]!
+ vld1.32 {q2, q3}, [r2, :128]!
+.if \forward
+ vsub.f32 q6, q8, q10
+.else
+ vadd.f32 q6, q8, q10
+.endif
+ vmul.f32 q8, q14, q2
+ vst1.32 {q4, q5}, [r6, :128]!
+ vmul.f32 q10, q15, q3
+ vmul.f32 q9, q13, q3
+ vmul.f32 q11, q12, q2
+ vmul.f32 q14, q14, q3
+ vst1.32 {q6, q7}, [r8, :128]!
+ vmul.f32 q15, q15, q2
+ vmul.f32 q12, q12, q3
+ vmul.f32 q13, q13, q2
+ vadd.f32 q10, q10, q8
+ vsub.f32 q11, q11, q9
+ vld1.32 {q8, q9}, [r3, :128]
+ vsub.f32 q14, q15, q14
+ vadd.f32 q15, q13, q12
+ vadd.f32 q13, q11, q10
+ vadd.f32 q12, q15, q14
+ vsub.f32 q15, q15, q14
+ vsub.f32 q14, q11, q10
+ vld1.32 {q10, q11}, [r5, :128]
+ vadd.f32 q0, q8, q13
+ vadd.f32 q1, q9, q12
+.if \forward
+ vadd.f32 q2, q10, q15
+ vsub.f32 q3, q11, q14
+.else
+ vsub.f32 q2, q10, q15
+ vadd.f32 q3, q11, q14
+.endif
+ vsub.f32 q4, q8, q13
+ vst1.32 {q0, q1}, [r3, :128]!
+ vsub.f32 q5, q9, q12
+.if \forward
+ vsub.f32 q6, q10, q15
+.else
+ vadd.f32 q6, q10, q15
+.endif
+ vst1.32 {q2, q3}, [r5, :128]!
+.if \forward
+ vadd.f32 q7, q11, q14
+.else
+ vsub.f32 q7, q11, q14
+.endif
+ vst1.32 {q4, q5}, [r7, :128]!
+ vst1.32 {q6, q7}, [r12, :128]!
+ bne 1b
+
+ vpop {q4-q7}
+ pop {r4-r8, pc}
+.endm
+
+.macro neon_static_x8_t, forward=1
+ .align 4
+
+.if \forward
+#ifdef __APPLE__
+ .globl _neon_static_x8_t_f
+_neon_static_x8_t_f:
+#else
+ .globl neon_static_x8_t_f
+neon_static_x8_t_f:
+#endif
+.else
+#ifdef __APPLE__
+ .globl _neon_static_x8_t_i
+_neon_static_x8_t_i:
+#else
+ .globl neon_static_x8_t_i
+neon_static_x8_t_i:
+#endif
+.endif
+ push {r4-r8, lr}
+ vpush {q4-q7}
+
+ add r4, r0, r1, lsl #1 @ data2
+ add r3, r0, r1 @ data1
+ add r6, r4, r1, lsl #1 @ data4
+ add r5, r4, r1 @ data3
+ add r8, r6, r1, lsl #1 @ data6
+ add r7, r6, r1 @ data5
+ add r12, r8, r1 @ data7
+
+1:
+ vld1.32 {q2, q3}, [r2, :128]!
+ subs r1, r1, #32
+ vld1.32 {q14, q15}, [r5, :128]
+ vmul.f32 q12, q15, q2
+ vld1.32 {q10, q11}, [r4, :128]
+ vmul.f32 q8, q14, q3
+ vmul.f32 q13, q14, q2
+ vmul.f32 q9, q10, q3
+ vmul.f32 q1, q10, q2
+ vmul.f32 q0, q11, q2
+ vmul.f32 q14, q11, q3
+ vmul.f32 q15, q15, q3
+ vsub.f32 q10, q12, q8
+ vld1.32 {q2, q3}, [r2, :128]!
+ vadd.f32 q11, q0, q9
+ vadd.f32 q8, q15, q13
+ vsub.f32 q9, q1, q14
+ vld1.32 {q12, q13}, [r3, :128]
+ vsub.f32 q15, q11, q10
+ vsub.f32 q14, q9, q8
+.if \forward
+ vadd.f32 q4, q12, q15
+ vsub.f32 q6, q12, q15
+ vsub.f32 q5, q13, q14
+ vadd.f32 q7, q13, q14
+.else
+ vsub.f32 q4, q12, q15
+ vadd.f32 q6, q12, q15
+ vadd.f32 q5, q13, q14
+ vsub.f32 q7, q13, q14
+.endif
+ vld1.32 {q14, q15}, [r8, :128]
+ vld1.32 {q12, q13}, [r6, :128]
+ vmul.f32 q1, q14, q2
+ vmul.f32 q0, q14, q3
+ vst1.32 {q4, q5}, [r3, :128]
+ vmul.f32 q14, q15, q3
+ vmul.f32 q4, q15, q2
+ vadd.f32 q15, q9, q8
+ vst1.32 {q6, q7}, [r5, :128]
+ vmul.f32 q8, q12, q3
+ vmul.f32 q5, q13, q3
+ vmul.f32 q12, q12, q2
+ vmul.f32 q9, q13, q2
+ vadd.f32 q14, q14, q1
+ vsub.f32 q13, q4, q0
+ vadd.f32 q0, q9, q8
+ vld1.32 {q8, q9}, [r0, :128]
+ vadd.f32 q1, q11, q10
+ vsub.f32 q12, q12, q5
+ vadd.f32 q11, q8, q15
+ vsub.f32 q8, q8, q15
+ vadd.f32 q2, q12, q14
+ vsub.f32 q10, q0, q13
+ vadd.f32 q15, q0, q13
+ vadd.f32 q13, q9, q1
+ vsub.f32 q9, q9, q1
+ vsub.f32 q12, q12, q14
+ vadd.f32 q0, q11, q2
+ vadd.f32 q1, q13, q15
+ vsub.f32 q4, q11, q2
+.if \forward
+ vadd.f32 q2, q8, q10
+ vsub.f32 q3, q9, q12
+.else
+ vsub.f32 q2, q8, q10
+ vadd.f32 q3, q9, q12
+.endif
+ vst2.32 {q0, q1}, [r0, :128]!
+ vsub.f32 q5, q13, q15
+ vld1.32 {q14, q15}, [r12, :128]
+.if \forward
+ vadd.f32 q7, q9, q12
+.else
+ vsub.f32 q7, q9, q12
+.endif
+ vld1.32 {q12, q13}, [r7, :128]
+ vst2.32 {q2, q3}, [r4, :128]!
+ vld1.32 {q2, q3}, [r2, :128]!
+.if \forward
+ vsub.f32 q6, q8, q10
+.else
+ vadd.f32 q6, q8, q10
+.endif
+ vmul.f32 q8, q14, q2
+ vst2.32 {q4, q5}, [r6, :128]!
+ vmul.f32 q10, q15, q3
+ vmul.f32 q9, q13, q3
+ vmul.f32 q11, q12, q2
+ vmul.f32 q14, q14, q3
+ vst2.32 {q6, q7}, [r8, :128]!
+ vmul.f32 q15, q15, q2
+ vmul.f32 q12, q12, q3
+ vmul.f32 q13, q13, q2
+ vadd.f32 q10, q10, q8
+ vsub.f32 q11, q11, q9
+ vld1.32 {q8, q9}, [r3, :128]
+ vsub.f32 q14, q15, q14
+ vadd.f32 q15, q13, q12
+ vadd.f32 q13, q11, q10
+ vadd.f32 q12, q15, q14
+ vsub.f32 q15, q15, q14
+ vsub.f32 q14, q11, q10
+ vld1.32 {q10, q11}, [r5, :128]
+ vadd.f32 q0, q8, q13
+ vadd.f32 q1, q9, q12
+.if \forward
+ vadd.f32 q2, q10, q15
+ vsub.f32 q3, q11, q14
+.else
+ vsub.f32 q2, q10, q15
+ vadd.f32 q3, q11, q14
+.endif
+ vsub.f32 q4, q8, q13
+ vst2.32 {q0, q1}, [r3, :128]!
+ vsub.f32 q5, q9, q12
+.if \forward
+ vsub.f32 q6, q10, q15
+.else
+ vadd.f32 q6, q10, q15
+.endif
+ vst2.32 {q2, q3}, [r5, :128]!
+.if \forward
+ vadd.f32 q7, q11, q14
+.else
+ vsub.f32 q7, q11, q14
+.endif
+ vst2.32 {q4, q5}, [r7, :128]!
+ vst2.32 {q6, q7}, [r12, :128]!
+ bne 1b
+
+ vpop {q4-q7}
+ pop {r4-r8, pc}
+.endm
+
+# neon_static_e_f
+neon_static_e, forward=1
+
+# neon_static_e_i
+neon_static_e, forward=0
+
+# neon_static_o_f
+neon_static_o, forward=1
+
+# neon_static_o_i
+neon_static_o, forward=0
+
+# neon_static_x4_f
+neon_static_x4, forward=1
+
+# neon_static_x4_i
+neon_static_x4, forward=0
+
+# neon_static_x8_f
+neon_static_x8, forward=1
+
+# neon_static_x8_i
+neon_static_x8, forward=0
+
+# neon_static_x8_t_f
+neon_static_x8_t, forward=1
+
+# neon_static_x8_t_i
+neon_static_x8_t, forward=0
+
diff --git a/lib/ffts/src/neon_static_f.s b/lib/ffts/src/neon_static_f.s
deleted file mode 100644
index 920d13c..0000000
--- a/lib/ffts/src/neon_static_f.s
+++ /dev/null
@@ -1,956 +0,0 @@
-/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_e_f
-_neon_static_e_f:
-#else
- .globl neon_static_e_f
-neon_static_e_f:
-#endif
- push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
- vstmdb sp!, {d8-d15}
- ldr lr, [r0, #40] @ this is p->N
- add r3, r1, #0
- add r7, r1, lr
- add r5, r7, lr
- add r10, r5, lr
- add r4, r10, lr
- add r8, r4, lr
- add r6, r8, lr
- add r9, r6, lr
- ldr r12, [r0]
- add r1, r0, #0
- add r0, r2, #0
- ldr r2, [r1, #16] @ this is p->ee_ws
- ldr r11, [r1, #28] @ this is p->i0
-
- vld1.32 {d16, d17}, [r2, :128]
-_neon_ee_loop:
- vld2.32 {q15}, [r10, :128]!
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vld2.32 {q9}, [r4, :128]!
- vld2.32 {q10}, [r3, :128]!
- vld2.32 {q11}, [r6, :128]!
- vld2.32 {q12}, [r5, :128]!
- vsub.f32 q1, q14, q13
- vld2.32 {q0}, [r9, :128]!
- subs r11, r11, #1
- vsub.f32 q2, q0, q15
- vadd.f32 q0, q0, q15
- vmul.f32 d10, d2, d17
- vmul.f32 d11, d3, d16
- vmul.f32 d12, d3, d17
- vmul.f32 d6, d4, d17
- vmul.f32 d7, d5, d16
- vmul.f32 d8, d4, d16
- vmul.f32 d9, d5, d17
- vmul.f32 d13, d2, d16
- vsub.f32 d7, d7, d6
- vadd.f32 d11, d11, d10
- vsub.f32 q1, q12, q11
- vsub.f32 q2, q10, q9
- vadd.f32 d6, d9, d8
- vadd.f32 q4, q14, q13
- vadd.f32 q11, q12, q11
- vadd.f32 q12, q10, q9
- vsub.f32 d10, d13, d12
- vsub.f32 q7, q4, q0
- vsub.f32 q9, q12, q11
- vsub.f32 q13, q5, q3
- vsub.f32 d29, d5, d2 @
- vadd.f32 q5, q5, q3
- vadd.f32 q10, q4, q0
- vadd.f32 q11, q12, q11
- vadd.f32 d31, d5, d2 @
- vadd.f32 d28, d4, d3 @
- vsub.f32 d30, d4, d3 @
- vsub.f32 d5, d19, d14 @
- vsub.f32 d7, d31, d26 @
- vadd.f32 q1, q14, q5
- vadd.f32 q0, q11, q10
- vadd.f32 d6, d30, d27 @
- vadd.f32 d4, d18, d15 @
- vadd.f32 d13, d19, d14 @
- vsub.f32 d12, d18, d15 @
- vadd.f32 d15, d31, d26 @
- ldr r2, [r12], #4
- vtrn.32 q1, q3
- ldr lr, [r12], #4
- vtrn.32 q0, q2
- add r2, r0, r2, lsl #2
- vsub.f32 q4, q11, q10
- add lr, r0, lr, lsl #2
- vsub.f32 q5, q14, q5
- vsub.f32 d14, d30, d27 @
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_ee_loop
-
- ldr r11, [r1, #12]
- vld2.32 {q9}, [r5, :128]! @tag2
- vld2.32 {q13}, [r3, :128]! @tag0
- vld2.32 {q12}, [r4, :128]! @tag1
- vld2.32 {q0}, [r7, :128]! @tag4
- vsub.f32 q11, q13, q12
- vld2.32 {q8}, [r6, :128]! @tag3
- vadd.f32 q12, q13, q12
- vsub.f32 q10, q9, q8
- vadd.f32 q8, q9, q8
- vadd.f32 q9, q12, q8
- vsub.f32 d9, d23, d20 @
- vadd.f32 d11, d23, d20 @
- vsub.f32 q8, q12, q8
- vadd.f32 d8, d22, d21 @
- vsub.f32 d10, d22, d21 @
- ldr r2, [r12], #4
- vld1.32 {d20, d21}, [r11, :128]
- ldr lr, [r12], #4
- vtrn.32 q9, q4
- add r2, r0, r2, lsl #2
- vtrn.32 q8, q5
- add lr, r0, lr, lsl #2
- vswp d9,d10
- vst1.32 {d8,d9,d10,d11}, [lr, :128]!
- vld2.32 {q13}, [r10, :128]! @tag7
- vld2.32 {q15}, [r9, :128]! @tag6
- vld2.32 {q11}, [r8, :128]! @tag5
- vsub.f32 q14, q15, q13
- vsub.f32 q12, q0, q11
- vadd.f32 q11, q0, q11
- vadd.f32 q13, q15, q13
- vsub.f32 d13, d29, d24 @
- vadd.f32 q15, q13, q11
- vadd.f32 d12, d28, d25 @
- vadd.f32 d15, d29, d24 @
- vsub.f32 d14, d28, d25 @
- vtrn.32 q15, q6
- vsub.f32 q15, q13, q11
- vtrn.32 q15, q7
- vswp d13, d14
- vst1.32 {d12,d13,d14,d15}, [lr, :128]!
- vtrn.32 q13, q14
- vtrn.32 q11, q12
- vmul.f32 d24, d26, d21
- vmul.f32 d28, d27, d20
- vmul.f32 d25, d26, d20
- vmul.f32 d26, d27, d21
- vmul.f32 d27, d22, d21
- vmul.f32 d30, d23, d20
- vmul.f32 d29, d23, d21
- vmul.f32 d22, d22, d20
- vsub.f32 d21, d28, d24
- vadd.f32 d20, d26, d25
- vadd.f32 d25, d30, d27
- vsub.f32 d24, d22, d29
- vadd.f32 q11, q12, q10
- vsub.f32 q10, q12, q10
- vadd.f32 q0, q9, q11
- vsub.f32 q2, q9, q11
- vsub.f32 d3, d17, d20 @
- vadd.f32 d7, d17, d20 @
- vadd.f32 d2, d16, d21 @
- vsub.f32 d6, d16, d21 @
- vswp d1, d2
- vswp d5, d6
- vstmia r2!, {q0-q3}
-
- add r2, r7, #0
- add r7, r9, #0
- add r9, r2, #0
- add r2, r8, #0
- add r8, r10, #0
- add r10, r2, #0
- ldr r11, [r1, #32] @ this is p->i1
- cmp r11, #0
- beq _neon_oo_loop_exit
-_neon_oo_loop:
- vld2.32 {q8}, [r6, :128]!
- vld2.32 {q9}, [r5, :128]!
- vld2.32 {q10}, [r4, :128]!
- vld2.32 {q13}, [r3, :128]!
- vadd.f32 q11, q9, q8
- vsub.f32 q8, q9, q8
- vsub.f32 q9, q13, q10
- vadd.f32 q12, q13, q10
- subs r11, r11, #1
- vld2.32 {q10}, [r7, :128]!
- vld2.32 {q13}, [r9, :128]!
- vsub.f32 q2, q12, q11
- vadd.f32 d7, d19, d16 @
- vsub.f32 d3, d19, d16 @
- vsub.f32 d6, d18, d17 @
- vadd.f32 d2, d18, d17 @
- vld2.32 {q9}, [r8, :128]!
- vld2.32 {q8}, [r10, :128]!
- vadd.f32 q0, q12, q11
- vadd.f32 q11, q13, q8
- vadd.f32 q12, q10, q9
- vsub.f32 q8, q13, q8
- vsub.f32 q9, q10, q9
- vsub.f32 q6, q12, q11
- vadd.f32 q4, q12, q11
- vtrn.32 q0, q2
- ldr r2, [r12], #4
- vadd.f32 d15, d19, d16 @
- ldr lr, [r12], #4
- vsub.f32 d11, d19, d16 @
- vsub.f32 d14, d18, d17 @
- vadd.f32 d10, d18, d17 @
- add r2, r0, r2, lsl #2
- vtrn.32 q1, q3
- add lr, r0, lr, lsl #2
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_oo_loop
-_neon_oo_loop_exit:
-
-
- add r2, r3, #0
- add r3, r7, #0
- add r7, r2, #0
- add r2, r4, #0
- add r4, r8, #0
- add r8, r2, #0
- add r2, r5, #0
- add r5, r9, #0
- add r9, r2, #0
- add r2, r6, #0
- add r6, r10, #0
- add r10, r2, #0
- add r2, r9, #0
- add r9, r10, #0
- add r10, r2, #0
- ldr r2, [r1, #16]
- ldr r11, [r1, #32] @ this is p->i1
- cmp r11, #0
- beq _neon_ee_loop2_exit
-
- vld1.32 {d16, d17}, [r2, :128]
-_neon_ee_loop2:
- vld2.32 {q15}, [r10, :128]!
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vld2.32 {q9}, [r4, :128]!
- vld2.32 {q10}, [r3, :128]!
- vld2.32 {q11}, [r6, :128]!
- vld2.32 {q12}, [r5, :128]!
- vsub.f32 q1, q14, q13
- vld2.32 {q0}, [r9, :128]!
- subs r11, r11, #1
- vsub.f32 q2, q0, q15
- vadd.f32 q0, q0, q15
- vmul.f32 d10, d2, d17
- vmul.f32 d11, d3, d16
- vmul.f32 d12, d3, d17
- vmul.f32 d6, d4, d17
- vmul.f32 d7, d5, d16
- vmul.f32 d8, d4, d16
- vmul.f32 d9, d5, d17
- vmul.f32 d13, d2, d16
- vsub.f32 d7, d7, d6
- vadd.f32 d11, d11, d10
- vsub.f32 q1, q12, q11
- vsub.f32 q2, q10, q9
- vadd.f32 d6, d9, d8
- vadd.f32 q4, q14, q13
- vadd.f32 q11, q12, q11
- vadd.f32 q12, q10, q9
- vsub.f32 d10, d13, d12
- vsub.f32 q7, q4, q0
- vsub.f32 q9, q12, q11
- vsub.f32 q13, q5, q3
- vsub.f32 d29, d5, d2 @
- vadd.f32 q5, q5, q3
- vadd.f32 q10, q4, q0
- vadd.f32 q11, q12, q11
- vadd.f32 d31, d5, d2 @
- vadd.f32 d28, d4, d3 @
- vsub.f32 d30, d4, d3 @
- vsub.f32 d5, d19, d14 @
- vsub.f32 d7, d31, d26 @
- vadd.f32 q1, q14, q5
- vadd.f32 q0, q11, q10
- vadd.f32 d6, d30, d27 @
- vadd.f32 d4, d18, d15 @
- vadd.f32 d13, d19, d14 @
- vsub.f32 d12, d18, d15 @
- vadd.f32 d15, d31, d26 @
- ldr r2, [r12], #4
- vtrn.32 q1, q3
- ldr lr, [r12], #4
- vtrn.32 q0, q2
- add r2, r0, r2, lsl #2
- vsub.f32 q4, q11, q10
- add lr, r0, lr, lsl #2
- vsub.f32 q5, q14, q5
- vsub.f32 d14, d30, d27 @
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_ee_loop2
-_neon_ee_loop2_exit:
-
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-
-
-
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_o_f
-_neon_static_o_f:
-#else
- .globl neon_static_o_f
-neon_static_o_f:
-#endif
- push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
- vstmdb sp!, {d8-d15}
- ldr lr, [r0, #40] @ this is p->N
- add r3, r1, #0
- add r7, r1, lr
- add r5, r7, lr
- add r10, r5, lr
- add r4, r10, lr
- add r8, r4, lr
- add r6, r8, lr
- add r9, r6, lr
- ldr r12, [r0]
- add r1, r0, #0
- add r0, r2, #0
- ldr r2, [r1, #16] @ this is p->ee_ws
- ldr r11, [r1, #28] @ this is p->i0
-
- vld1.32 {d16, d17}, [r2, :128]
-_neon_ee_o_loop:
- vld2.32 {q15}, [r10, :128]!
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vld2.32 {q9}, [r4, :128]!
- vld2.32 {q10}, [r3, :128]!
- vld2.32 {q11}, [r6, :128]!
- vld2.32 {q12}, [r5, :128]!
- vsub.f32 q1, q14, q13
- vld2.32 {q0}, [r9, :128]!
- subs r11, r11, #1
- vsub.f32 q2, q0, q15
- vadd.f32 q0, q0, q15
- vmul.f32 d10, d2, d17
- vmul.f32 d11, d3, d16
- vmul.f32 d12, d3, d17
- vmul.f32 d6, d4, d17
- vmul.f32 d7, d5, d16
- vmul.f32 d8, d4, d16
- vmul.f32 d9, d5, d17
- vmul.f32 d13, d2, d16
- vsub.f32 d7, d7, d6
- vadd.f32 d11, d11, d10
- vsub.f32 q1, q12, q11
- vsub.f32 q2, q10, q9
- vadd.f32 d6, d9, d8
- vadd.f32 q4, q14, q13
- vadd.f32 q11, q12, q11
- vadd.f32 q12, q10, q9
- vsub.f32 d10, d13, d12
- vsub.f32 q7, q4, q0
- vsub.f32 q9, q12, q11
- vsub.f32 q13, q5, q3
- vsub.f32 d29, d5, d2 @
- vadd.f32 q5, q5, q3
- vadd.f32 q10, q4, q0
- vadd.f32 q11, q12, q11
- vadd.f32 d31, d5, d2 @
- vadd.f32 d28, d4, d3 @
- vsub.f32 d30, d4, d3 @
- vsub.f32 d5, d19, d14 @
- vsub.f32 d7, d31, d26 @
- vadd.f32 q1, q14, q5
- vadd.f32 q0, q11, q10
- vadd.f32 d6, d30, d27 @
- vadd.f32 d4, d18, d15 @
- vadd.f32 d13, d19, d14 @
- vsub.f32 d12, d18, d15 @
- vadd.f32 d15, d31, d26 @
- ldr r2, [r12], #4
- vtrn.32 q1, q3
- ldr lr, [r12], #4
- vtrn.32 q0, q2
- add r2, r0, r2, lsl #2
- vsub.f32 q4, q11, q10
- add lr, r0, lr, lsl #2
- vsub.f32 q5, q14, q5
- vsub.f32 d14, d30, d27 @
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_ee_o_loop
-
- add r2, r7, #0
- add r7, r9, #0
- add r9, r2, #0
- add r2, r8, #0
- add r8, r10, #0
- add r10, r2, #0
- ldr r11, [r1, #32] @ this is p->i1
- cmp r11, #0
- beq _neon_oo_o_loop_exit
-_neon_oo_o_loop:
- vld2.32 {q8}, [r6, :128]!
- vld2.32 {q9}, [r5, :128]!
- vld2.32 {q10}, [r4, :128]!
- vld2.32 {q13}, [r3, :128]!
- vadd.f32 q11, q9, q8
- vsub.f32 q8, q9, q8
- vsub.f32 q9, q13, q10
- vadd.f32 q12, q13, q10
- subs r11, r11, #1
- vld2.32 {q10}, [r7, :128]!
- vld2.32 {q13}, [r9, :128]!
- vsub.f32 q2, q12, q11
- vadd.f32 d7, d19, d16 @
- vsub.f32 d3, d19, d16 @
- vsub.f32 d6, d18, d17 @
- vadd.f32 d2, d18, d17 @
- vld2.32 {q9}, [r8, :128]!
- vld2.32 {q8}, [r10, :128]!
- vadd.f32 q0, q12, q11
- vadd.f32 q11, q13, q8
- vadd.f32 q12, q10, q9
- vsub.f32 q8, q13, q8
- vsub.f32 q9, q10, q9
- vsub.f32 q6, q12, q11
- vadd.f32 q4, q12, q11
- vtrn.32 q0, q2
- ldr r2, [r12], #4
- vadd.f32 d15, d19, d16 @
- ldr lr, [r12], #4
- vsub.f32 d11, d19, d16 @
- vsub.f32 d14, d18, d17 @
- vadd.f32 d10, d18, d17 @
- add r2, r0, r2, lsl #2
- vtrn.32 q1, q3
- add lr, r0, lr, lsl #2
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_oo_o_loop
-_neon_oo_o_loop_exit:
-
- ldr r11, [r1, #8]
- vld1.32 {q8}, [r5, :128]!
- vld1.32 {q10}, [r6, :128]!
- vld2.32 {q11}, [r4, :128]!
- vld2.32 {q13}, [r3, :128]!
- vld2.32 {q15}, [r10, :128]!
- vorr d25, d17, d17
- vorr d24, d20, d20
- vorr d20, d16, d16
- vsub.f32 q9, q13, q11
- vadd.f32 q11, q13, q11
- ldr r2, [r12], #4
- vtrn.32 d24, d25
- ldr lr, [r12], #4
- vtrn.32 d20, d21
- add r2, r0, r2, lsl #2
- vsub.f32 q8, q10, q12
- add lr, r0, lr, lsl #2
- vadd.f32 q10, q10, q12
- vadd.f32 q0, q11, q10
- vsub.f32 d25, d19, d16 @
- vadd.f32 d27, d19, d16 @
- vsub.f32 q1, q11, q10
- vadd.f32 d24, d18, d17 @
- vsub.f32 d26, d18, d17 @
- vtrn.32 q0, q12
- vtrn.32 q1, q13
- vld1.32 {d24, d25}, [r11, :128]
- vswp d1, d2
- vst1.32 {q0, q1}, [r2, :128]!
- vld2.32 {q0}, [r9, :128]!
- vadd.f32 q1, q0, q15
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vsub.f32 q15, q0, q15
- vsub.f32 q0, q14, q13
- vadd.f32 q3, q14, q13
- vadd.f32 q2, q3, q1
- vsub.f32 d29, d1, d30 @
- vadd.f32 d27, d1, d30 @
- vsub.f32 q3, q3, q1
- vadd.f32 d28, d0, d31 @
- vsub.f32 d26, d0, d31 @
- vtrn.32 q2, q14
- vtrn.32 q3, q13
- vswp d5, d6
- vst1.32 {q2, q3}, [r2, :128]!
- vtrn.32 q11, q9
- vtrn.32 q10, q8
- vmul.f32 d20, d18, d25
- vmul.f32 d22, d19, d24
- vmul.f32 d21, d19, d25
- vmul.f32 d18, d18, d24
- vmul.f32 d19, d16, d25
- vmul.f32 d30, d17, d24
- vmul.f32 d23, d16, d24
- vmul.f32 d24, d17, d25
- vadd.f32 d17, d22, d20
- vsub.f32 d16, d18, d21
- vsub.f32 d21, d30, d19
- vadd.f32 d20, d24, d23
- vadd.f32 q9, q8, q10
- vsub.f32 q8, q8, q10
- vadd.f32 q4, q14, q9
- vsub.f32 q6, q14, q9
- vsub.f32 d11, d27, d16 @
- vadd.f32 d15, d27, d16 @
- vadd.f32 d10, d26, d17 @
- vsub.f32 d14, d26, d17 @
- vswp d9, d10
- vswp d13, d14
- vstmia lr!, {q4-q7}
-
-
- add r2, r3, #0
- add r3, r7, #0
- add r7, r2, #0
- add r2, r4, #0
- add r4, r8, #0
- add r8, r2, #0
- add r2, r5, #0
- add r5, r9, #0
- add r9, r2, #0
- add r2, r6, #0
- add r6, r10, #0
- add r10, r2, #0
- add r2, r9, #0
- add r9, r10, #0
- add r10, r2, #0
- ldr r2, [r1, #16]
- ldr r11, [r1, #32] @ this is p->i1
- cmp r11, #0
- beq _neon_ee_o_loop2_exit
-
- vld1.32 {d16, d17}, [r2, :128]
-_neon_ee_o_loop2:
- vld2.32 {q15}, [r10, :128]!
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vld2.32 {q9}, [r4, :128]!
- vld2.32 {q10}, [r3, :128]!
- vld2.32 {q11}, [r6, :128]!
- vld2.32 {q12}, [r5, :128]!
- vsub.f32 q1, q14, q13
- vld2.32 {q0}, [r9, :128]!
- subs r11, r11, #1
- vsub.f32 q2, q0, q15
- vadd.f32 q0, q0, q15
- vmul.f32 d10, d2, d17
- vmul.f32 d11, d3, d16
- vmul.f32 d12, d3, d17
- vmul.f32 d6, d4, d17
- vmul.f32 d7, d5, d16
- vmul.f32 d8, d4, d16
- vmul.f32 d9, d5, d17
- vmul.f32 d13, d2, d16
- vsub.f32 d7, d7, d6
- vadd.f32 d11, d11, d10
- vsub.f32 q1, q12, q11
- vsub.f32 q2, q10, q9
- vadd.f32 d6, d9, d8
- vadd.f32 q4, q14, q13
- vadd.f32 q11, q12, q11
- vadd.f32 q12, q10, q9
- vsub.f32 d10, d13, d12
- vsub.f32 q7, q4, q0
- vsub.f32 q9, q12, q11
- vsub.f32 q13, q5, q3
- vsub.f32 d29, d5, d2 @
- vadd.f32 q5, q5, q3
- vadd.f32 q10, q4, q0
- vadd.f32 q11, q12, q11
- vadd.f32 d31, d5, d2 @
- vadd.f32 d28, d4, d3 @
- vsub.f32 d30, d4, d3 @
- vsub.f32 d5, d19, d14 @
- vsub.f32 d7, d31, d26 @
- vadd.f32 q1, q14, q5
- vadd.f32 q0, q11, q10
- vadd.f32 d6, d30, d27 @
- vadd.f32 d4, d18, d15 @
- vadd.f32 d13, d19, d14 @
- vsub.f32 d12, d18, d15 @
- vadd.f32 d15, d31, d26 @
- ldr r2, [r12], #4
- vtrn.32 q1, q3
- ldr lr, [r12], #4
- vtrn.32 q0, q2
- add r2, r0, r2, lsl #2
- vsub.f32 q4, q11, q10
- add lr, r0, lr, lsl #2
- vsub.f32 q5, q14, q5
- vsub.f32 d14, d30, d27 @
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_ee_o_loop2
-_neon_ee_o_loop2_exit:
-
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_x4_f
-_neon_static_x4_f:
-#else
- .globl neon_static_x4_f
-neon_static_x4_f:
-#endif
-@ add r3, r0, #0
- push {r4, r5, r6, lr}
- vstmdb sp!, {d8-d15}
-
- vld1.32 {q8,q9}, [r0, :128]
- add r4, r0, r1, lsl #1
- vld1.32 {q10,q11}, [r4, :128]
- add r5, r0, r1, lsl #2
- vld1.32 {q12,q13}, [r5, :128]
- add r6, r4, r1, lsl #2
- vld1.32 {q14,q15}, [r6, :128]
- vld1.32 {q2,q3}, [r2, :128]
-
- vmul.f32 q0, q13, q3
- vmul.f32 q5, q12, q2
- vmul.f32 q1, q14, q2
- vmul.f32 q4, q14, q3
- vmul.f32 q14, q12, q3
- vmul.f32 q13, q13, q2
- vmul.f32 q12, q15, q3
- vmul.f32 q2, q15, q2
- vsub.f32 q0, q5, q0
- vadd.f32 q13, q13, q14
- vadd.f32 q12, q12, q1
- vsub.f32 q1, q2, q4
- vadd.f32 q15, q0, q12
- vsub.f32 q12, q0, q12
- vadd.f32 q14, q13, q1
- vsub.f32 q13, q13, q1
- vadd.f32 q0, q8, q15
- vadd.f32 q1, q9, q14
- vadd.f32 q2, q10, q13 @
- vsub.f32 q4, q8, q15
- vsub.f32 q3, q11, q12 @
- vst1.32 {q0,q1}, [r0, :128]
- vsub.f32 q5, q9, q14
- vsub.f32 q6, q10, q13 @
- vadd.f32 q7, q11, q12 @
- vst1.32 {q2,q3}, [r4, :128]
- vst1.32 {q4,q5}, [r5, :128]
- vst1.32 {q6,q7}, [r6, :128]
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, pc}
-
-
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_x8_f
-_neon_static_x8_f:
-#else
- .globl neon_static_x8_f
-neon_static_x8_f:
-#endif
- push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
- vstmdb sp!, {d8-d15}
- mov r11, #0
- add r3, r0, #0 @ data0
- add r5, r0, r1, lsl #1 @ data2
- add r4, r0, r1 @ data1
- add r7, r5, r1, lsl #1 @ data4
- add r6, r5, r1 @ data3
- add r9, r7, r1, lsl #1 @ data6
- add r8, r7, r1 @ data5
- add r10, r9, r1 @ data7
- add r12, r2, #0 @ LUT
-
- sub r11, r11, r1, lsr #5
-neon_x8_loop:
- vld1.32 {q2,q3}, [r12, :128]!
- vld1.32 {q14,q15}, [r6, :128]
- vld1.32 {q10,q11}, [r5, :128]
- adds r11, r11, #1
- vmul.f32 q12, q15, q2
- vmul.f32 q8, q14, q3
- vmul.f32 q13, q14, q2
- vmul.f32 q9, q10, q3
- vmul.f32 q1, q10, q2
- vmul.f32 q0, q11, q2
- vmul.f32 q14, q11, q3
- vmul.f32 q15, q15, q3
- vld1.32 {q2,q3}, [r12, :128]!
- vsub.f32 q10, q12, q8
- vadd.f32 q11, q0, q9
- vadd.f32 q8, q15, q13
- vld1.32 {q12,q13}, [r4, :128]
- vsub.f32 q9, q1, q14
- vsub.f32 q15, q11, q10
- vsub.f32 q14, q9, q8
- vadd.f32 q4, q12, q15 @
- vsub.f32 q6, q12, q15 @
- vsub.f32 q5, q13, q14 @
- vadd.f32 q7, q13, q14 @
- vld1.32 {q14,q15}, [r9, :128]
- vld1.32 {q12,q13}, [r7, :128]
- vmul.f32 q1, q14, q2
- vmul.f32 q0, q14, q3
- vst1.32 {q4,q5}, [r4, :128]
- vmul.f32 q14, q15, q3
- vmul.f32 q4, q15, q2
- vadd.f32 q15, q9, q8
- vst1.32 {q6,q7}, [r6, :128]
- vmul.f32 q8, q12, q3
- vmul.f32 q5, q13, q3
- vmul.f32 q12, q12, q2
- vmul.f32 q9, q13, q2
- vadd.f32 q14, q14, q1
- vsub.f32 q13, q4, q0
- vadd.f32 q0, q9, q8
- vld1.32 {q8,q9}, [r3, :128]
- vadd.f32 q1, q11, q10
- vsub.f32 q12, q12, q5
- vadd.f32 q11, q8, q15
- vsub.f32 q8, q8, q15
- vadd.f32 q2, q12, q14
- vsub.f32 q10, q0, q13
- vadd.f32 q15, q0, q13
- vadd.f32 q13, q9, q1
- vsub.f32 q9, q9, q1
- vsub.f32 q12, q12, q14
- vadd.f32 q0, q11, q2
- vadd.f32 q1, q13, q15
- vsub.f32 q4, q11, q2
- vadd.f32 q2, q8, q10 @
- vsub.f32 q3, q9, q12 @
- vst1.32 {q0,q1}, [r3, :128]!
- vsub.f32 q5, q13, q15
- vld1.32 {q14,q15}, [r10, :128]
- vadd.f32 q7, q9, q12 @
- vld1.32 {q12,q13}, [r8, :128]
- vst1.32 {q2,q3}, [r5, :128]!
- vld1.32 {q2,q3}, [r12, :128]!
- vsub.f32 q6, q8, q10 @
- vmul.f32 q8, q14, q2
- vst1.32 {q4,q5}, [r7, :128]!
- vmul.f32 q10, q15, q3
- vmul.f32 q9, q13, q3
- vmul.f32 q11, q12, q2
- vmul.f32 q14, q14, q3
- vst1.32 {q6,q7}, [r9, :128]!
- vmul.f32 q15, q15, q2
- vmul.f32 q12, q12, q3
- vmul.f32 q13, q13, q2
- vadd.f32 q10, q10, q8
- vsub.f32 q11, q11, q9
- vld1.32 {q8,q9}, [r4, :128]
- vsub.f32 q14, q15, q14
- vadd.f32 q15, q13, q12
- vadd.f32 q13, q11, q10
- vadd.f32 q12, q15, q14
- vsub.f32 q15, q15, q14
- vsub.f32 q14, q11, q10
- vld1.32 {q10,q11}, [r6, :128]
- vadd.f32 q0, q8, q13
- vadd.f32 q1, q9, q12
- vadd.f32 q2, q10, q15 @
- vsub.f32 q3, q11, q14 @
- vsub.f32 q4, q8, q13
- vst1.32 {q0,q1}, [r4, :128]!
- vsub.f32 q5, q9, q12
- vsub.f32 q6, q10, q15 @
- vst1.32 {q2,q3}, [r6, :128]!
- vadd.f32 q7, q11, q14 @
- vst1.32 {q4,q5}, [r8, :128]!
- vst1.32 {q6,q7}, [r10, :128]!
- bne neon_x8_loop
-
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_x8_t_f
-_neon_static_x8_t_f:
-#else
- .globl neon_static_x8_t_f
-neon_static_x8_t_f:
-#endif
- push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
- vstmdb sp!, {d8-d15}
- mov r11, #0
- add r3, r0, #0 @ data0
- add r5, r0, r1, lsl #1 @ data2
- add r4, r0, r1 @ data1
- add r7, r5, r1, lsl #1 @ data4
- add r6, r5, r1 @ data3
- add r9, r7, r1, lsl #1 @ data6
- add r8, r7, r1 @ data5
- add r10, r9, r1 @ data7
- add r12, r2, #0 @ LUT
-
- sub r11, r11, r1, lsr #5
-neon_x8_t_loop:
- vld1.32 {q2,q3}, [r12, :128]!
- vld1.32 {q14,q15}, [r6, :128]
- vld1.32 {q10,q11}, [r5, :128]
- adds r11, r11, #1
- vmul.f32 q12, q15, q2
- vmul.f32 q8, q14, q3
- vmul.f32 q13, q14, q2
- vmul.f32 q9, q10, q3
- vmul.f32 q1, q10, q2
- vmul.f32 q0, q11, q2
- vmul.f32 q14, q11, q3
- vmul.f32 q15, q15, q3
- vld1.32 {q2,q3}, [r12, :128]!
- vsub.f32 q10, q12, q8
- vadd.f32 q11, q0, q9
- vadd.f32 q8, q15, q13
- vld1.32 {q12,q13}, [r4, :128]
- vsub.f32 q9, q1, q14
- vsub.f32 q15, q11, q10
- vsub.f32 q14, q9, q8
- vadd.f32 q4, q12, q15 @
- vsub.f32 q6, q12, q15 @
- vsub.f32 q5, q13, q14 @
- vadd.f32 q7, q13, q14 @
- vld1.32 {q14,q15}, [r9, :128]
- vld1.32 {q12,q13}, [r7, :128]
- vmul.f32 q1, q14, q2
- vmul.f32 q0, q14, q3
- vst1.32 {q4,q5}, [r4, :128]
- vmul.f32 q14, q15, q3
- vmul.f32 q4, q15, q2
- vadd.f32 q15, q9, q8
- vst1.32 {q6,q7}, [r6, :128]
- vmul.f32 q8, q12, q3
- vmul.f32 q5, q13, q3
- vmul.f32 q12, q12, q2
- vmul.f32 q9, q13, q2
- vadd.f32 q14, q14, q1
- vsub.f32 q13, q4, q0
- vadd.f32 q0, q9, q8
- vld1.32 {q8,q9}, [r3, :128]
- vadd.f32 q1, q11, q10
- vsub.f32 q12, q12, q5
- vadd.f32 q11, q8, q15
- vsub.f32 q8, q8, q15
- vadd.f32 q2, q12, q14
- vsub.f32 q10, q0, q13
- vadd.f32 q15, q0, q13
- vadd.f32 q13, q9, q1
- vsub.f32 q9, q9, q1
- vsub.f32 q12, q12, q14
- vadd.f32 q0, q11, q2
- vadd.f32 q1, q13, q15
- vsub.f32 q4, q11, q2
- vadd.f32 q2, q8, q10 @
- vsub.f32 q3, q9, q12 @
- vst2.32 {q0,q1}, [r3, :128]!
- vsub.f32 q5, q13, q15
- vld1.32 {q14,q15}, [r10, :128]
- vadd.f32 q7, q9, q12 @
- vld1.32 {q12,q13}, [r8, :128]
- vst2.32 {q2,q3}, [r5, :128]!
- vld1.32 {q2,q3}, [r12, :128]!
- vsub.f32 q6, q8, q10 @
- vmul.f32 q8, q14, q2
- vst2.32 {q4,q5}, [r7, :128]!
- vmul.f32 q10, q15, q3
- vmul.f32 q9, q13, q3
- vmul.f32 q11, q12, q2
- vmul.f32 q14, q14, q3
- vst2.32 {q6,q7}, [r9, :128]!
- vmul.f32 q15, q15, q2
- vmul.f32 q12, q12, q3
- vmul.f32 q13, q13, q2
- vadd.f32 q10, q10, q8
- vsub.f32 q11, q11, q9
- vld1.32 {q8,q9}, [r4, :128]
- vsub.f32 q14, q15, q14
- vadd.f32 q15, q13, q12
- vadd.f32 q13, q11, q10
- vadd.f32 q12, q15, q14
- vsub.f32 q15, q15, q14
- vsub.f32 q14, q11, q10
- vld1.32 {q10,q11}, [r6, :128]
- vadd.f32 q0, q8, q13
- vadd.f32 q1, q9, q12
- vadd.f32 q2, q10, q15 @
- vsub.f32 q3, q11, q14 @
- vsub.f32 q4, q8, q13
- vst2.32 {q0,q1}, [r4, :128]!
- vsub.f32 q5, q9, q12
- vsub.f32 q6, q10, q15 @
- vst2.32 {q2,q3}, [r6, :128]!
- vadd.f32 q7, q11, q14 @
- vst2.32 {q4,q5}, [r8, :128]!
- vst2.32 {q6,q7}, [r10, :128]!
- bne neon_x8_t_loop
-
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-
-
diff --git a/lib/ffts/src/neon_static_i.s b/lib/ffts/src/neon_static_i.s
deleted file mode 100644
index cfa766c..0000000
--- a/lib/ffts/src/neon_static_i.s
+++ /dev/null
@@ -1,955 +0,0 @@
-/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_e_i
-_neon_static_e_i:
-#else
- .globl neon_static_e_i
-neon_static_e_i:
-#endif
- push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
- vstmdb sp!, {d8-d15}
- ldr lr, [r0, #40] @ this is p->N
- add r3, r1, #0
- add r7, r1, lr
- add r5, r7, lr
- add r10, r5, lr
- add r4, r10, lr
- add r8, r4, lr
- add r6, r8, lr
- add r9, r6, lr
- ldr r12, [r0]
- add r1, r0, #0
- add r0, r2, #0
- ldr r2, [r1, #16] @ this is p->ee_ws
- ldr r11, [r1, #28] @ this is p->i0
-
- vld1.32 {d16, d17}, [r2, :128]
-_neon_ee_loop:
- vld2.32 {q15}, [r10, :128]!
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vld2.32 {q9}, [r4, :128]!
- vld2.32 {q10}, [r3, :128]!
- vld2.32 {q11}, [r6, :128]!
- vld2.32 {q12}, [r5, :128]!
- vsub.f32 q1, q14, q13
- vld2.32 {q0}, [r9, :128]!
- subs r11, r11, #1
- vsub.f32 q2, q0, q15
- vadd.f32 q0, q0, q15
- vmul.f32 d10, d2, d17
- vmul.f32 d11, d3, d16
- vmul.f32 d12, d3, d17
- vmul.f32 d6, d4, d17
- vmul.f32 d7, d5, d16
- vmul.f32 d8, d4, d16
- vmul.f32 d9, d5, d17
- vmul.f32 d13, d2, d16
- vsub.f32 d7, d7, d6
- vadd.f32 d11, d11, d10
- vsub.f32 q1, q12, q11
- vsub.f32 q2, q10, q9
- vadd.f32 d6, d9, d8
- vadd.f32 q4, q14, q13
- vadd.f32 q11, q12, q11
- vadd.f32 q12, q10, q9
- vsub.f32 d10, d13, d12
- vsub.f32 q7, q4, q0
- vsub.f32 q9, q12, q11
- vsub.f32 q13, q5, q3
- vadd.f32 d29, d5, d2 @
- vadd.f32 q5, q5, q3
- vadd.f32 q10, q4, q0
- vadd.f32 q11, q12, q11
- vsub.f32 d31, d5, d2 @
- vsub.f32 d28, d4, d3 @
- vadd.f32 d30, d4, d3 @
- vadd.f32 d5, d19, d14 @
- vadd.f32 d7, d31, d26 @
- vadd.f32 q1, q14, q5
- vadd.f32 q0, q11, q10
- vsub.f32 d6, d30, d27 @
- vsub.f32 d4, d18, d15 @
- vsub.f32 d13, d19, d14 @
- vadd.f32 d12, d18, d15 @
- vsub.f32 d15, d31, d26 @
- ldr r2, [r12], #4
- vtrn.32 q1, q3
- ldr lr, [r12], #4
- vtrn.32 q0, q2
- add r2, r0, r2, lsl #2
- vsub.f32 q4, q11, q10
- add lr, r0, lr, lsl #2
- vsub.f32 q5, q14, q5
- vadd.f32 d14, d30, d27 @
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_ee_loop
-
- ldr r11, [r1, #12]
- vld2.32 {q9}, [r5, :128]! @tag2
- vld2.32 {q13}, [r3, :128]! @tag0
- vld2.32 {q12}, [r4, :128]! @tag1
- vld2.32 {q0}, [r7, :128]! @tag4
- vsub.f32 q11, q13, q12
- vld2.32 {q8}, [r6, :128]! @tag3
- vadd.f32 q12, q13, q12
- vsub.f32 q10, q9, q8
- vadd.f32 q8, q9, q8
- vadd.f32 q9, q12, q8
- vadd.f32 d9, d23, d20 @
- vsub.f32 d11, d23, d20 @
- vsub.f32 q8, q12, q8
- vsub.f32 d8, d22, d21 @
- vadd.f32 d10, d22, d21 @
- ldr r2, [r12], #4
- vld1.32 {d20, d21}, [r11, :128]
- ldr lr, [r12], #4
- vtrn.32 q9, q4
- add r2, r0, r2, lsl #2
- vtrn.32 q8, q5
- add lr, r0, lr, lsl #2
- vswp d9,d10
- vst1.32 {d8,d9,d10,d11}, [lr, :128]!
- vld2.32 {q13}, [r10, :128]! @tag7
- vld2.32 {q15}, [r9, :128]! @tag6
- vld2.32 {q11}, [r8, :128]! @tag5
- vsub.f32 q14, q15, q13
- vsub.f32 q12, q0, q11
- vadd.f32 q11, q0, q11
- vadd.f32 q13, q15, q13
- vadd.f32 d13, d29, d24 @
- vadd.f32 q15, q13, q11
- vsub.f32 d12, d28, d25 @
- vsub.f32 d15, d29, d24 @
- vadd.f32 d14, d28, d25 @
- vtrn.32 q15, q6
- vsub.f32 q15, q13, q11
- vtrn.32 q15, q7
- vswp d13, d14
- vst1.32 {d12,d13,d14,d15}, [lr, :128]!
- vtrn.32 q13, q14
- vtrn.32 q11, q12
- vmul.f32 d24, d26, d21
- vmul.f32 d28, d27, d20
- vmul.f32 d25, d26, d20
- vmul.f32 d26, d27, d21
- vmul.f32 d27, d22, d21
- vmul.f32 d30, d23, d20
- vmul.f32 d29, d23, d21
- vmul.f32 d22, d22, d20
- vsub.f32 d21, d28, d24
- vadd.f32 d20, d26, d25
- vadd.f32 d25, d30, d27
- vsub.f32 d24, d22, d29
- vadd.f32 q11, q12, q10
- vsub.f32 q10, q12, q10
- vadd.f32 q0, q9, q11
- vsub.f32 q2, q9, q11
- vadd.f32 d3, d17, d20 @
- vsub.f32 d7, d17, d20 @
- vsub.f32 d2, d16, d21 @
- vadd.f32 d6, d16, d21 @
- vswp d1, d2
- vswp d5, d6
- vstmia r2!, {q0-q3}
-
- add r2, r7, #0
- add r7, r9, #0
- add r9, r2, #0
- add r2, r8, #0
- add r8, r10, #0
- add r10, r2, #0
- ldr r11, [r1, #32] @ this is p->i1
- cmp r11, #0
- beq _neon_oo_loop_exit
-_neon_oo_loop:
- vld2.32 {q8}, [r6, :128]!
- vld2.32 {q9}, [r5, :128]!
- vld2.32 {q10}, [r4, :128]!
- vld2.32 {q13}, [r3, :128]!
- vadd.f32 q11, q9, q8
- vsub.f32 q8, q9, q8
- vsub.f32 q9, q13, q10
- vadd.f32 q12, q13, q10
- subs r11, r11, #1
- vld2.32 {q10}, [r7, :128]!
- vld2.32 {q13}, [r9, :128]!
- vsub.f32 q2, q12, q11
- vsub.f32 d7, d19, d16 @
- vadd.f32 d3, d19, d16 @
- vadd.f32 d6, d18, d17 @
- vsub.f32 d2, d18, d17 @
- vld2.32 {q9}, [r8, :128]!
- vld2.32 {q8}, [r10, :128]!
- vadd.f32 q0, q12, q11
- vadd.f32 q11, q13, q8
- vadd.f32 q12, q10, q9
- vsub.f32 q8, q13, q8
- vsub.f32 q9, q10, q9
- vsub.f32 q6, q12, q11
- vadd.f32 q4, q12, q11
- vtrn.32 q0, q2
- ldr r2, [r12], #4
- vsub.f32 d15, d19, d16 @
- ldr lr, [r12], #4
- vadd.f32 d11, d19, d16 @
- vadd.f32 d14, d18, d17 @
- vsub.f32 d10, d18, d17 @
- add r2, r0, r2, lsl #2
- vtrn.32 q1, q3
- add lr, r0, lr, lsl #2
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_oo_loop
-_neon_oo_loop_exit:
-
- add r2, r3, #0
- add r3, r7, #0
- add r7, r2, #0
- add r2, r4, #0
- add r4, r8, #0
- add r8, r2, #0
- add r2, r5, #0
- add r5, r9, #0
- add r9, r2, #0
- add r2, r6, #0
- add r6, r10, #0
- add r10, r2, #0
- add r2, r9, #0
- add r9, r10, #0
- add r10, r2, #0
- ldr r2, [r1, #16]
- ldr r11, [r1, #32] @ this is p->i1
- cmp r11, #0
- beq _neon_ee_loop2_exit
-
- vld1.32 {d16, d17}, [r2, :128]
-_neon_ee_loop2:
- vld2.32 {q15}, [r10, :128]!
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vld2.32 {q9}, [r4, :128]!
- vld2.32 {q10}, [r3, :128]!
- vld2.32 {q11}, [r6, :128]!
- vld2.32 {q12}, [r5, :128]!
- vsub.f32 q1, q14, q13
- vld2.32 {q0}, [r9, :128]!
- subs r11, r11, #1
- vsub.f32 q2, q0, q15
- vadd.f32 q0, q0, q15
- vmul.f32 d10, d2, d17
- vmul.f32 d11, d3, d16
- vmul.f32 d12, d3, d17
- vmul.f32 d6, d4, d17
- vmul.f32 d7, d5, d16
- vmul.f32 d8, d4, d16
- vmul.f32 d9, d5, d17
- vmul.f32 d13, d2, d16
- vsub.f32 d7, d7, d6
- vadd.f32 d11, d11, d10
- vsub.f32 q1, q12, q11
- vsub.f32 q2, q10, q9
- vadd.f32 d6, d9, d8
- vadd.f32 q4, q14, q13
- vadd.f32 q11, q12, q11
- vadd.f32 q12, q10, q9
- vsub.f32 d10, d13, d12
- vsub.f32 q7, q4, q0
- vsub.f32 q9, q12, q11
- vsub.f32 q13, q5, q3
- vadd.f32 d29, d5, d2 @
- vadd.f32 q5, q5, q3
- vadd.f32 q10, q4, q0
- vadd.f32 q11, q12, q11
- vsub.f32 d31, d5, d2 @
- vsub.f32 d28, d4, d3 @
- vadd.f32 d30, d4, d3 @
- vadd.f32 d5, d19, d14 @
- vadd.f32 d7, d31, d26 @
- vadd.f32 q1, q14, q5
- vadd.f32 q0, q11, q10
- vsub.f32 d6, d30, d27 @
- vsub.f32 d4, d18, d15 @
- vsub.f32 d13, d19, d14 @
- vadd.f32 d12, d18, d15 @
- vsub.f32 d15, d31, d26 @
- ldr r2, [r12], #4
- vtrn.32 q1, q3
- ldr lr, [r12], #4
- vtrn.32 q0, q2
- add r2, r0, r2, lsl #2
- vsub.f32 q4, q11, q10
- add lr, r0, lr, lsl #2
- vsub.f32 q5, q14, q5
- vadd.f32 d14, d30, d27 @
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_ee_loop2
-_neon_ee_loop2_exit:
-
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-
-
-
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_o_i
-_neon_static_o_i:
-#else
- .globl neon_static_o_i
-neon_static_o_i:
-#endif
- push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
- vstmdb sp!, {d8-d15}
- ldr lr, [r0, #40] @ this is p->N
- add r3, r1, #0
- add r7, r1, lr
- add r5, r7, lr
- add r10, r5, lr
- add r4, r10, lr
- add r8, r4, lr
- add r6, r8, lr
- add r9, r6, lr
- ldr r12, [r0]
- add r1, r0, #0
- add r0, r2, #0
- ldr r2, [r1, #16] @ this is p->ee_ws
- ldr r11, [r1, #28] @ this is p->i0
-
- vld1.32 {d16, d17}, [r2, :128]
-_neon_ee_o_loop:
- vld2.32 {q15}, [r10, :128]!
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vld2.32 {q9}, [r4, :128]!
- vld2.32 {q10}, [r3, :128]!
- vld2.32 {q11}, [r6, :128]!
- vld2.32 {q12}, [r5, :128]!
- vsub.f32 q1, q14, q13
- vld2.32 {q0}, [r9, :128]!
- subs r11, r11, #1
- vsub.f32 q2, q0, q15
- vadd.f32 q0, q0, q15
- vmul.f32 d10, d2, d17
- vmul.f32 d11, d3, d16
- vmul.f32 d12, d3, d17
- vmul.f32 d6, d4, d17
- vmul.f32 d7, d5, d16
- vmul.f32 d8, d4, d16
- vmul.f32 d9, d5, d17
- vmul.f32 d13, d2, d16
- vsub.f32 d7, d7, d6
- vadd.f32 d11, d11, d10
- vsub.f32 q1, q12, q11
- vsub.f32 q2, q10, q9
- vadd.f32 d6, d9, d8
- vadd.f32 q4, q14, q13
- vadd.f32 q11, q12, q11
- vadd.f32 q12, q10, q9
- vsub.f32 d10, d13, d12
- vsub.f32 q7, q4, q0
- vsub.f32 q9, q12, q11
- vsub.f32 q13, q5, q3
- vadd.f32 d29, d5, d2 @
- vadd.f32 q5, q5, q3
- vadd.f32 q10, q4, q0
- vadd.f32 q11, q12, q11
- vsub.f32 d31, d5, d2 @
- vsub.f32 d28, d4, d3 @
- vadd.f32 d30, d4, d3 @
- vadd.f32 d5, d19, d14 @
- vadd.f32 d7, d31, d26 @
- vadd.f32 q1, q14, q5
- vadd.f32 q0, q11, q10
- vsub.f32 d6, d30, d27 @
- vsub.f32 d4, d18, d15 @
- vsub.f32 d13, d19, d14 @
- vadd.f32 d12, d18, d15 @
- vsub.f32 d15, d31, d26 @
- ldr r2, [r12], #4
- vtrn.32 q1, q3
- ldr lr, [r12], #4
- vtrn.32 q0, q2
- add r2, r0, r2, lsl #2
- vsub.f32 q4, q11, q10
- add lr, r0, lr, lsl #2
- vsub.f32 q5, q14, q5
- vadd.f32 d14, d30, d27 @
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_ee_o_loop
-
- add r2, r7, #0
- add r7, r9, #0
- add r9, r2, #0
- add r2, r8, #0
- add r8, r10, #0
- add r10, r2, #0
- ldr r11, [r1, #32] @ this is p->i1
- cmp r11, #0
- beq _neon_oo_o_loop_exit
-_neon_oo_o_loop:
- vld2.32 {q8}, [r6, :128]!
- vld2.32 {q9}, [r5, :128]!
- vld2.32 {q10}, [r4, :128]!
- vld2.32 {q13}, [r3, :128]!
- vadd.f32 q11, q9, q8
- vsub.f32 q8, q9, q8
- vsub.f32 q9, q13, q10
- vadd.f32 q12, q13, q10
- subs r11, r11, #1
- vld2.32 {q10}, [r7, :128]!
- vld2.32 {q13}, [r9, :128]!
- vsub.f32 q2, q12, q11
- vsub.f32 d7, d19, d16 @
- vadd.f32 d3, d19, d16 @
- vadd.f32 d6, d18, d17 @
- vsub.f32 d2, d18, d17 @
- vld2.32 {q9}, [r8, :128]!
- vld2.32 {q8}, [r10, :128]!
- vadd.f32 q0, q12, q11
- vadd.f32 q11, q13, q8
- vadd.f32 q12, q10, q9
- vsub.f32 q8, q13, q8
- vsub.f32 q9, q10, q9
- vsub.f32 q6, q12, q11
- vadd.f32 q4, q12, q11
- vtrn.32 q0, q2
- ldr r2, [r12], #4
- vsub.f32 d15, d19, d16 @
- ldr lr, [r12], #4
- vadd.f32 d11, d19, d16 @
- vadd.f32 d14, d18, d17 @
- vsub.f32 d10, d18, d17 @
- add r2, r0, r2, lsl #2
- vtrn.32 q1, q3
- add lr, r0, lr, lsl #2
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_oo_o_loop
-_neon_oo_o_loop_exit:
-
- ldr r11, [r1, #8]
- vld1.32 {q8}, [r5, :128]!
- vld1.32 {q10}, [r6, :128]!
- vld2.32 {q11}, [r4, :128]!
- vld2.32 {q13}, [r3, :128]!
- vld2.32 {q15}, [r10, :128]!
- vorr d25, d17, d17
- vorr d24, d20, d20
- vorr d20, d16, d16
- vsub.f32 q9, q13, q11
- vadd.f32 q11, q13, q11
- ldr r2, [r12], #4
- vtrn.32 d24, d25
- ldr lr, [r12], #4
- vtrn.32 d20, d21
- add r2, r0, r2, lsl #2
- vsub.f32 q8, q10, q12
- add lr, r0, lr, lsl #2
- vadd.f32 q10, q10, q12
- vadd.f32 q0, q11, q10
- vadd.f32 d25, d19, d16 @
- vsub.f32 d27, d19, d16 @
- vsub.f32 q1, q11, q10
- vsub.f32 d24, d18, d17 @
- vadd.f32 d26, d18, d17 @
- vtrn.32 q0, q12
- vtrn.32 q1, q13
- vld1.32 {d24, d25}, [r11, :128]
- vswp d1, d2
- vst1.32 {q0, q1}, [r2, :128]!
- vld2.32 {q0}, [r9, :128]!
- vadd.f32 q1, q0, q15
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vsub.f32 q15, q0, q15
- vsub.f32 q0, q14, q13
- vadd.f32 q3, q14, q13
- vadd.f32 q2, q3, q1
- vadd.f32 d29, d1, d30 @
- vsub.f32 d27, d1, d30 @
- vsub.f32 q3, q3, q1
- vsub.f32 d28, d0, d31 @
- vadd.f32 d26, d0, d31 @
- vtrn.32 q2, q14
- vtrn.32 q3, q13
- vswp d5, d6
- vst1.32 {q2, q3}, [r2, :128]!
- vtrn.32 q11, q9
- vtrn.32 q10, q8
- vmul.f32 d20, d18, d25
- vmul.f32 d22, d19, d24
- vmul.f32 d21, d19, d25
- vmul.f32 d18, d18, d24
- vmul.f32 d19, d16, d25
- vmul.f32 d30, d17, d24
- vmul.f32 d23, d16, d24
- vmul.f32 d24, d17, d25
- vadd.f32 d17, d22, d20
- vsub.f32 d16, d18, d21
- vsub.f32 d21, d30, d19
- vadd.f32 d20, d24, d23
- vadd.f32 q9, q8, q10
- vsub.f32 q8, q8, q10
- vadd.f32 q4, q14, q9
- vsub.f32 q6, q14, q9
- vadd.f32 d11, d27, d16 @
- vsub.f32 d15, d27, d16 @
- vsub.f32 d10, d26, d17 @
- vadd.f32 d14, d26, d17 @
- vswp d9, d10
- vswp d13, d14
- vstmia lr!, {q4-q7}
-
-
- add r2, r3, #0
- add r3, r7, #0
- add r7, r2, #0
- add r2, r4, #0
- add r4, r8, #0
- add r8, r2, #0
- add r2, r5, #0
- add r5, r9, #0
- add r9, r2, #0
- add r2, r6, #0
- add r6, r10, #0
- add r10, r2, #0
- add r2, r9, #0
- add r9, r10, #0
- add r10, r2, #0
- ldr r2, [r1, #16]
- ldr r11, [r1, #32] @ this is p->i1
- cmp r11, #0
- beq _neon_ee_o_loop2_exit
-
- vld1.32 {d16, d17}, [r2, :128]
-_neon_ee_o_loop2:
- vld2.32 {q15}, [r10, :128]!
- vld2.32 {q13}, [r8, :128]!
- vld2.32 {q14}, [r7, :128]!
- vld2.32 {q9}, [r4, :128]!
- vld2.32 {q10}, [r3, :128]!
- vld2.32 {q11}, [r6, :128]!
- vld2.32 {q12}, [r5, :128]!
- vsub.f32 q1, q14, q13
- vld2.32 {q0}, [r9, :128]!
- subs r11, r11, #1
- vsub.f32 q2, q0, q15
- vadd.f32 q0, q0, q15
- vmul.f32 d10, d2, d17
- vmul.f32 d11, d3, d16
- vmul.f32 d12, d3, d17
- vmul.f32 d6, d4, d17
- vmul.f32 d7, d5, d16
- vmul.f32 d8, d4, d16
- vmul.f32 d9, d5, d17
- vmul.f32 d13, d2, d16
- vsub.f32 d7, d7, d6
- vadd.f32 d11, d11, d10
- vsub.f32 q1, q12, q11
- vsub.f32 q2, q10, q9
- vadd.f32 d6, d9, d8
- vadd.f32 q4, q14, q13
- vadd.f32 q11, q12, q11
- vadd.f32 q12, q10, q9
- vsub.f32 d10, d13, d12
- vsub.f32 q7, q4, q0
- vsub.f32 q9, q12, q11
- vsub.f32 q13, q5, q3
- vadd.f32 d29, d5, d2 @
- vadd.f32 q5, q5, q3
- vadd.f32 q10, q4, q0
- vadd.f32 q11, q12, q11
- vsub.f32 d31, d5, d2 @
- vsub.f32 d28, d4, d3 @
- vadd.f32 d30, d4, d3 @
- vadd.f32 d5, d19, d14 @
- vadd.f32 d7, d31, d26 @
- vadd.f32 q1, q14, q5
- vadd.f32 q0, q11, q10
- vsub.f32 d6, d30, d27 @
- vsub.f32 d4, d18, d15 @
- vsub.f32 d13, d19, d14 @
- vadd.f32 d12, d18, d15 @
- vsub.f32 d15, d31, d26 @
- ldr r2, [r12], #4
- vtrn.32 q1, q3
- ldr lr, [r12], #4
- vtrn.32 q0, q2
- add r2, r0, r2, lsl #2
- vsub.f32 q4, q11, q10
- add lr, r0, lr, lsl #2
- vsub.f32 q5, q14, q5
- vadd.f32 d14, d30, d27 @
- vst2.32 {q0,q1}, [r2, :128]!
- vst2.32 {q2,q3}, [lr, :128]!
- vtrn.32 q4, q6
- vtrn.32 q5, q7
- vst2.32 {q4,q5}, [r2, :128]!
- vst2.32 {q6,q7}, [lr, :128]!
- bne _neon_ee_o_loop2
-_neon_ee_o_loop2_exit:
-
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_x4_i
-_neon_static_x4_i:
-#else
- .globl neon_static_x4_i
-neon_static_x4_i:
-#endif
-@ add r3, r0, #0
- push {r4, r5, r6, lr}
- vstmdb sp!, {d8-d15}
-
- vld1.32 {q8,q9}, [r0, :128]
- add r4, r0, r1, lsl #1
- vld1.32 {q10,q11}, [r4, :128]
- add r5, r0, r1, lsl #2
- vld1.32 {q12,q13}, [r5, :128]
- add r6, r4, r1, lsl #2
- vld1.32 {q14,q15}, [r6, :128]
- vld1.32 {q2,q3}, [r2, :128]
-
- vmul.f32 q0, q13, q3
- vmul.f32 q5, q12, q2
- vmul.f32 q1, q14, q2
- vmul.f32 q4, q14, q3
- vmul.f32 q14, q12, q3
- vmul.f32 q13, q13, q2
- vmul.f32 q12, q15, q3
- vmul.f32 q2, q15, q2
- vsub.f32 q0, q5, q0
- vadd.f32 q13, q13, q14
- vadd.f32 q12, q12, q1
- vsub.f32 q1, q2, q4
- vadd.f32 q15, q0, q12
- vsub.f32 q12, q0, q12
- vadd.f32 q14, q13, q1
- vsub.f32 q13, q13, q1
- vadd.f32 q0, q8, q15
- vadd.f32 q1, q9, q14
- vsub.f32 q2, q10, q13 @
- vsub.f32 q4, q8, q15
- vadd.f32 q3, q11, q12 @
- vst1.32 {q0,q1}, [r0, :128]
- vsub.f32 q5, q9, q14
- vadd.f32 q6, q10, q13 @
- vsub.f32 q7, q11, q12 @
- vst1.32 {q2,q3}, [r4, :128]
- vst1.32 {q4,q5}, [r5, :128]
- vst1.32 {q6,q7}, [r6, :128]
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, pc}
-
-
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_x8_i
-_neon_static_x8_i:
-#else
- .globl neon_static_x8_i
-neon_static_x8_i:
-#endif
- push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
- vstmdb sp!, {d8-d15}
- mov r11, #0
- add r3, r0, #0 @ data0
- add r5, r0, r1, lsl #1 @ data2
- add r4, r0, r1 @ data1
- add r7, r5, r1, lsl #1 @ data4
- add r6, r5, r1 @ data3
- add r9, r7, r1, lsl #1 @ data6
- add r8, r7, r1 @ data5
- add r10, r9, r1 @ data7
- add r12, r2, #0 @ LUT
-
- sub r11, r11, r1, lsr #5
-neon_x8_loop:
- vld1.32 {q2,q3}, [r12, :128]!
- vld1.32 {q14,q15}, [r6, :128]
- vld1.32 {q10,q11}, [r5, :128]
- adds r11, r11, #1
- vmul.f32 q12, q15, q2
- vmul.f32 q8, q14, q3
- vmul.f32 q13, q14, q2
- vmul.f32 q9, q10, q3
- vmul.f32 q1, q10, q2
- vmul.f32 q0, q11, q2
- vmul.f32 q14, q11, q3
- vmul.f32 q15, q15, q3
- vld1.32 {q2,q3}, [r12, :128]!
- vsub.f32 q10, q12, q8
- vadd.f32 q11, q0, q9
- vadd.f32 q8, q15, q13
- vld1.32 {q12,q13}, [r4, :128]
- vsub.f32 q9, q1, q14
- vsub.f32 q15, q11, q10
- vsub.f32 q14, q9, q8
- vsub.f32 q4, q12, q15 @
- vadd.f32 q6, q12, q15 @
- vadd.f32 q5, q13, q14 @
- vsub.f32 q7, q13, q14 @
- vld1.32 {q14,q15}, [r9, :128]
- vld1.32 {q12,q13}, [r7, :128]
- vmul.f32 q1, q14, q2
- vmul.f32 q0, q14, q3
- vst1.32 {q4,q5}, [r4, :128]
- vmul.f32 q14, q15, q3
- vmul.f32 q4, q15, q2
- vadd.f32 q15, q9, q8
- vst1.32 {q6,q7}, [r6, :128]
- vmul.f32 q8, q12, q3
- vmul.f32 q5, q13, q3
- vmul.f32 q12, q12, q2
- vmul.f32 q9, q13, q2
- vadd.f32 q14, q14, q1
- vsub.f32 q13, q4, q0
- vadd.f32 q0, q9, q8
- vld1.32 {q8,q9}, [r3, :128]
- vadd.f32 q1, q11, q10
- vsub.f32 q12, q12, q5
- vadd.f32 q11, q8, q15
- vsub.f32 q8, q8, q15
- vadd.f32 q2, q12, q14
- vsub.f32 q10, q0, q13
- vadd.f32 q15, q0, q13
- vadd.f32 q13, q9, q1
- vsub.f32 q9, q9, q1
- vsub.f32 q12, q12, q14
- vadd.f32 q0, q11, q2
- vadd.f32 q1, q13, q15
- vsub.f32 q4, q11, q2
- vsub.f32 q2, q8, q10 @
- vadd.f32 q3, q9, q12 @
- vst1.32 {q0,q1}, [r3, :128]!
- vsub.f32 q5, q13, q15
- vld1.32 {q14,q15}, [r10, :128]
- vsub.f32 q7, q9, q12 @
- vld1.32 {q12,q13}, [r8, :128]
- vst1.32 {q2,q3}, [r5, :128]!
- vld1.32 {q2,q3}, [r12, :128]!
- vadd.f32 q6, q8, q10 @
- vmul.f32 q8, q14, q2
- vst1.32 {q4,q5}, [r7, :128]!
- vmul.f32 q10, q15, q3
- vmul.f32 q9, q13, q3
- vmul.f32 q11, q12, q2
- vmul.f32 q14, q14, q3
- vst1.32 {q6,q7}, [r9, :128]!
- vmul.f32 q15, q15, q2
- vmul.f32 q12, q12, q3
- vmul.f32 q13, q13, q2
- vadd.f32 q10, q10, q8
- vsub.f32 q11, q11, q9
- vld1.32 {q8,q9}, [r4, :128]
- vsub.f32 q14, q15, q14
- vadd.f32 q15, q13, q12
- vadd.f32 q13, q11, q10
- vadd.f32 q12, q15, q14
- vsub.f32 q15, q15, q14
- vsub.f32 q14, q11, q10
- vld1.32 {q10,q11}, [r6, :128]
- vadd.f32 q0, q8, q13
- vadd.f32 q1, q9, q12
- vsub.f32 q2, q10, q15 @
- vadd.f32 q3, q11, q14 @
- vsub.f32 q4, q8, q13
- vst1.32 {q0,q1}, [r4, :128]!
- vsub.f32 q5, q9, q12
- vadd.f32 q6, q10, q15 @
- vst1.32 {q2,q3}, [r6, :128]!
- vsub.f32 q7, q11, q14 @
- vst1.32 {q4,q5}, [r8, :128]!
- vst1.32 {q6,q7}, [r10, :128]!
- bne neon_x8_loop
-
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-
- .align 4
-#ifdef __APPLE__
- .globl _neon_static_x8_t_i
-_neon_static_x8_t_i:
-#else
- .globl neon_static_x8_t_i
-neon_static_x8_t_i:
-#endif
- push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
- vstmdb sp!, {d8-d15}
- mov r11, #0
- add r3, r0, #0 @ data0
- add r5, r0, r1, lsl #1 @ data2
- add r4, r0, r1 @ data1
- add r7, r5, r1, lsl #1 @ data4
- add r6, r5, r1 @ data3
- add r9, r7, r1, lsl #1 @ data6
- add r8, r7, r1 @ data5
- add r10, r9, r1 @ data7
- add r12, r2, #0 @ LUT
-
- sub r11, r11, r1, lsr #5
-neon_x8_t_loop:
- vld1.32 {q2,q3}, [r12, :128]!
- vld1.32 {q14,q15}, [r6, :128]
- vld1.32 {q10,q11}, [r5, :128]
- adds r11, r11, #1
- vmul.f32 q12, q15, q2
- vmul.f32 q8, q14, q3
- vmul.f32 q13, q14, q2
- vmul.f32 q9, q10, q3
- vmul.f32 q1, q10, q2
- vmul.f32 q0, q11, q2
- vmul.f32 q14, q11, q3
- vmul.f32 q15, q15, q3
- vld1.32 {q2,q3}, [r12, :128]!
- vsub.f32 q10, q12, q8
- vadd.f32 q11, q0, q9
- vadd.f32 q8, q15, q13
- vld1.32 {q12,q13}, [r4, :128]
- vsub.f32 q9, q1, q14
- vsub.f32 q15, q11, q10
- vsub.f32 q14, q9, q8
- vsub.f32 q4, q12, q15 @
- vadd.f32 q6, q12, q15 @
- vadd.f32 q5, q13, q14 @
- vsub.f32 q7, q13, q14 @
- vld1.32 {q14,q15}, [r9, :128]
- vld1.32 {q12,q13}, [r7, :128]
- vmul.f32 q1, q14, q2
- vmul.f32 q0, q14, q3
- vst1.32 {q4,q5}, [r4, :128]
- vmul.f32 q14, q15, q3
- vmul.f32 q4, q15, q2
- vadd.f32 q15, q9, q8
- vst1.32 {q6,q7}, [r6, :128]
- vmul.f32 q8, q12, q3
- vmul.f32 q5, q13, q3
- vmul.f32 q12, q12, q2
- vmul.f32 q9, q13, q2
- vadd.f32 q14, q14, q1
- vsub.f32 q13, q4, q0
- vadd.f32 q0, q9, q8
- vld1.32 {q8,q9}, [r3, :128]
- vadd.f32 q1, q11, q10
- vsub.f32 q12, q12, q5
- vadd.f32 q11, q8, q15
- vsub.f32 q8, q8, q15
- vadd.f32 q2, q12, q14
- vsub.f32 q10, q0, q13
- vadd.f32 q15, q0, q13
- vadd.f32 q13, q9, q1
- vsub.f32 q9, q9, q1
- vsub.f32 q12, q12, q14
- vadd.f32 q0, q11, q2
- vadd.f32 q1, q13, q15
- vsub.f32 q4, q11, q2
- vsub.f32 q2, q8, q10 @
- vadd.f32 q3, q9, q12 @
- vst2.32 {q0,q1}, [r3, :128]!
- vsub.f32 q5, q13, q15
- vld1.32 {q14,q15}, [r10, :128]
- vsub.f32 q7, q9, q12 @
- vld1.32 {q12,q13}, [r8, :128]
- vst2.32 {q2,q3}, [r5, :128]!
- vld1.32 {q2,q3}, [r12, :128]!
- vadd.f32 q6, q8, q10 @
- vmul.f32 q8, q14, q2
- vst2.32 {q4,q5}, [r7, :128]!
- vmul.f32 q10, q15, q3
- vmul.f32 q9, q13, q3
- vmul.f32 q11, q12, q2
- vmul.f32 q14, q14, q3
- vst2.32 {q6,q7}, [r9, :128]!
- vmul.f32 q15, q15, q2
- vmul.f32 q12, q12, q3
- vmul.f32 q13, q13, q2
- vadd.f32 q10, q10, q8
- vsub.f32 q11, q11, q9
- vld1.32 {q8,q9}, [r4, :128]
- vsub.f32 q14, q15, q14
- vadd.f32 q15, q13, q12
- vadd.f32 q13, q11, q10
- vadd.f32 q12, q15, q14
- vsub.f32 q15, q15, q14
- vsub.f32 q14, q11, q10
- vld1.32 {q10,q11}, [r6, :128]
- vadd.f32 q0, q8, q13
- vadd.f32 q1, q9, q12
- vsub.f32 q2, q10, q15 @
- vadd.f32 q3, q11, q14 @
- vsub.f32 q4, q8, q13
- vst2.32 {q0,q1}, [r4, :128]!
- vsub.f32 q5, q9, q12
- vadd.f32 q6, q10, q15 @
- vst2.32 {q2,q3}, [r6, :128]!
- vsub.f32 q7, q11, q14 @
- vst2.32 {q4,q5}, [r8, :128]!
- vst2.32 {q6,q7}, [r10, :128]!
- bne neon_x8_t_loop
-
- vldmia sp!, {d8-d15}
- pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-
-
diff --git a/lib/ffts/src/patterns.c b/lib/ffts/src/patterns.c
deleted file mode 100644
index 93fe7f7..0000000
--- a/lib/ffts/src/patterns.c
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#include "patterns.h"
-
-void permute_addr(int N, int offset, int stride, int *d) {
- int i, a[4] = {0,2,1,3};
- for(i=0;i<4;i++) {
- d[i] = offset + (a[i] << stride);
- if(d[i] < 0) d[i] += N;
- }
-}
-
-void ffts_hardcodedleaf_is_rec(ptrdiff_t **is, int bigN, int N, int poffset, int offset, int stride, int even, int VL) {
-
- if(N > 4) {
- ffts_hardcodedleaf_is_rec(is, bigN, N/2, poffset, offset, stride + 1, even, VL);
- if(N/4 >= 4) ffts_hardcodedleaf_is_rec(is, bigN, N/4, poffset+(1<<stride),offset+(N/2), stride + 2, 0, VL);
- if(N/4 >= 4) ffts_hardcodedleaf_is_rec(is, bigN, N/4, poffset-(1<<stride),offset+(3*N/4), stride + 2, 0, VL);
- else {
- int temp = poffset+(1<<stride);
- if(temp < 0) temp += bigN;
- temp *= 2;
-
- if(!(temp % (VL*2))) {
- (*is)[0] = poffset+(1<<stride);
- (*is)[1] = poffset+(1<<stride)+(1<<(stride+2));
- (*is)[2] = poffset-(1<<stride);
- (*is)[3] = poffset-(1<<stride)+(1<<(stride+2));
- int i;
- for(i=0;i<4;i++) if((*is)[i] < 0) (*is)[i] += bigN;
- for(i=0;i<4;i++) (*is)[i] *= 2;
- *is += 4;
- }
- }
- }else if(N == 4) {
- int perm[4];
- permute_addr(bigN, poffset, stride, perm);
- if(!((perm[0]*2) % (VL*2))) {
- int i;
- for(i=0;i<4;i++) {
- (*is)[i] = perm[i] * 2;
- }
- *is += 4;
- }
- }
-}
-
-void ffts_init_is(ffts_plan_t *p, int N, int leafN, int VL) {
- int i, i0 = N/leafN/3+1, i1=N/leafN/3, i2 = N/leafN/3;
- int stride = log(N/leafN)/log(2);
-
- p->is = malloc(N/VL * sizeof(ptrdiff_t));
-
- ptrdiff_t *is = p->is;
-
- if((N/leafN) % 3 > 1) i1++;
-
- for(i=0;i<i0;i++) ffts_hardcodedleaf_is_rec(&is, N, leafN, i, 0, stride, 1, VL);
- for(i=i0;i<i0+i1;i++) {
- ffts_hardcodedleaf_is_rec(&is, N, leafN/2, i, 0, stride+1, 1, VL);
- ffts_hardcodedleaf_is_rec(&is, N, leafN/2, i-(1<<stride), 0, stride+1, 1, VL);
- }
- for(i=0-i2;i<0;i++) ffts_hardcodedleaf_is_rec(&is, N, leafN, i, 0, stride, 1, VL);
-
-
-//for(i=0;i<N/VL;i++) {
-// printf("%td ", p->is[i]);
-// if(i % 16 == 15) printf("\n");
-//}
-
- p->i0 = i0; p->i1 = i1;
-}
-/**
- *
- *
- */
-void ffts_elaborate_offsets(ptrdiff_t *offsets, int leafN, int N, int ioffset, int ooffset, int stride, int even) {
- if((even && N == leafN) || (!even && N <= leafN)) {
- offsets[2*(ooffset/leafN)] = ioffset*2;
- offsets[2*(ooffset/leafN)+1] = ooffset;
- }else if(N > 4) {
- ffts_elaborate_offsets(offsets, leafN, N/2, ioffset, ooffset, stride+1, even);
- ffts_elaborate_offsets(offsets, leafN, N/4, ioffset+(1<<stride), ooffset+N/2, stride+2, 0);
- if(N/4 >= leafN)
- ffts_elaborate_offsets(offsets, leafN, N/4, ioffset-(1<<stride), ooffset+3*N/4, stride+2, 0);
- }
-
-}
-
-int compare_offsets(const void *a, const void *b) {
- return ((ptrdiff_t *)a)[0] - ((ptrdiff_t *)b)[0];
-}
-
-uint32_t reverse_bits(uint32_t a, int n) {
- uint32_t x = 0;
-
- int i;
- for(i=0;i<n;i++) {
- if(a & (1 << i)) x |= 1 << (n-i-1);
- }
- return x;
-}
-
-
-void ffts_init_offsets(ffts_plan_t *p, int N, int leafN) {
-
- ptrdiff_t *offsets = malloc(2 * N/leafN * sizeof(ptrdiff_t));
-
- ffts_elaborate_offsets(offsets, leafN, N, 0, 0, 1, 1);
-
- size_t i;
- for(i=0;i<2*N/leafN;i+=2) {
- if(offsets[i] < 0) offsets[i] = N + offsets[i];
- }
-
- qsort(offsets, N/leafN, 2 * sizeof(ptrdiff_t), compare_offsets);
- //elaborate_is(p, N, 0, 0, 1);
- p->offsets = malloc(N/leafN * sizeof(ptrdiff_t));
- for(i=0;i<N/leafN;i++) {
- p->offsets[i] = offsets[i*2+1]*2;
- }
-//for(i=0;i<N/leafN;i++) {
-// printf("%4d %4d\n", p->offsets[i], reverse_bits(p->offsets[i], __builtin_ctzl(2*N)));
-//}
- free(offsets);
-}
-
-/*
-int tree_count(int N, int leafN, int offset) {
-
- if(N <= leafN) return 0;
- int count = 0;
- count += tree_count(N/4, leafN, offset);
- count += tree_count(N/8, leafN, offset + N/4);
- count += tree_count(N/8, leafN, offset + N/4 + N/8);
- count += tree_count(N/4, leafN, offset + N/2);
- count += tree_count(N/4, leafN, offset + 3*N/4);
-
- return 1 + count;
-}
-
-void elaborate_tree(transform_index_t **p, int N, int leafN, int offset) {
-
- if(N <= leafN) return;
- elaborate_tree(p, N/4, leafN, offset);
- elaborate_tree(p, N/8, leafN, offset + N/4);
- elaborate_tree(p, N/8, leafN, offset + N/4 + N/8);
- elaborate_tree(p, N/4, leafN, offset + N/2);
- elaborate_tree(p, N/4, leafN, offset + 3*N/4);
-
- (*p)[0] = N;
- (*p)[1] = offset*2;
-
- (*p)+=2;
-}
-
-void ffts_init_tree(ffts_plan_t *p, int N, int leafN) {
-
- int count = tree_count(N, leafN, 0) + 1;
- transform_index_t *ps = p->transforms = malloc(count * 2 * sizeof(transform_index_t));
-
-//printf("count = %d\n", count);
-
- elaborate_tree(&ps, N, leafN, 0);
- #ifdef __ARM_NEON__
- ps -= 2;
- #endif
- ps[0] = 0;
- ps[1] = 0;
-//int i;
-//for(i=0;i<count;i++) {
-// fprintf(stderr, "%lu %lu - %d\n", p->transforms[i*2], p->transforms[i*2+1],
-// __builtin_ctzl(p->transforms[i*2]) - 5);
-//}
-
-}
-*/
diff --git a/lib/ffts/src/patterns.h b/lib/ffts/src/patterns.h
index 6e2d6bb..69bbe76 100644
--- a/lib/ffts/src/patterns.h
+++ b/lib/ffts/src/patterns.h
@@ -1,44 +1,520 @@
/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+Copyright (c) 2012, The University of Waikato
+Copyright (c) 2015, Jukka Ojanen <jukka.ojanen@kolumbus.fi>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the organization nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifndef FFTS_PATTERNS_H
+#define FFTS_PATTERNS_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
+
+#include <stddef.h>
+
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+
+#ifndef LEAF_N
+#define LEAF_N 8
+#endif
+
+#if LEAF_N == 8
+static void
+ffts_elaborate_offsets_even8(ptrdiff_t *const offsets,
+ int log_N);
+
+static void
+ffts_elaborate_offsets_odd8(ptrdiff_t *const offsets,
+ int log_N,
+ int input_offset,
+ int output_offset,
+ int stride);
+
+static void
+ffts_hardcodedleaf_is_rec_even4(ptrdiff_t **is,
+ int big_N,
+ int offset,
+ int stride,
+ int VL);
+
+static void
+ffts_hardcodedleaf_is_rec_even8(ptrdiff_t **is,
+ int big_N,
+ int offset,
+ int stride,
+ int VL);
+#else
+static void
+ffts_elaborate_offsets_even(ptrdiff_t *const offsets,
+ int leaf_N,
+ int N,
+ int input_offset,
+ int output_offset,
+ int stride);
+
+static void
+ffts_elaborate_offsets_odd(ptrdiff_t *const offsets,
+ int leaf_N,
+ int N,
+ int input_offset,
+ int output_offset,
+ int stride);
+
+static void
+ffts_hardcodedleaf_is_rec_even(ptrdiff_t **is,
+ int big_N,
+ int N,
+ int offset,
+ int stride,
+ int VL);
+
+static void
+ffts_hardcodedleaf_is_rec_odd(ptrdiff_t **is,
+ int big_N,
+ int N,
+ int offset,
+ int stride,
+ int VL);
+#endif
+
+static int
+ffts_compare_offsets(const void *pa, const void *pb)
+{
+ const ptrdiff_t a = *(const ptrdiff_t*) pa;
+ const ptrdiff_t b = *(const ptrdiff_t*) pb;
+ return (a > b) - (a < b);
+}
+
+static void
+ffts_permute_addr(int N, int offset, int stride, int *const d)
+{
+ int a[4] = {0,2,1,3};
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ d[i] = offset + (a[i] << stride);
+ if (d[i] < 0) {
+ d[i] += N;
+ }
+ }
+}
+
+#if LEAF_N == 8
+static void
+ffts_elaborate_offsets_even8(ptrdiff_t *const offsets, int log_N)
+{
+ int offset = 1 << (log_N - 4);
+ int stride = 1;
+
+ offsets[0] = 0;
+ offsets[1] = 0;
+ offsets[2] = offset * 2;
+ offsets[3] = 8;
+ offsets[4] = offset;
+ offsets[5] = 16;
+ offsets[6] = -offset;
+ offsets[7] = 24;
+
+ for(; log_N > 5; --log_N, stride *= 2) {
+ ffts_elaborate_offsets_odd8(offsets, log_N - 2,
+ stride, 1 << (log_N - 1), stride * 4);
+
+ ffts_elaborate_offsets_odd8(offsets, log_N - 2,
+ -stride, 3 * (1 << (log_N - 2)), stride * 4);
+ }
+}
+
+static void
+ffts_elaborate_offsets_odd8(ptrdiff_t *const offsets,
+ int log_N,
+ int input_offset,
+ int output_offset,
+ int stride)
+{
+ if (log_N <= 4) {
+ offsets[(output_offset / 4) + 0] = input_offset * 2;
+ offsets[(output_offset / 4) + 1] = output_offset;
+
+ if (log_N == 4) {
+ offsets[(output_offset / 4) + 2] = (input_offset + stride) * 2;
+ offsets[(output_offset / 4) + 3] = output_offset + 8;
+ }
+ } else {
+ ffts_elaborate_offsets_odd8(offsets, log_N - 1, input_offset,
+ output_offset, stride * 2);
+
+ ffts_elaborate_offsets_odd8(offsets, log_N - 2, input_offset + stride,
+ output_offset + (1 << (log_N - 1)), stride * 4);
+
+ ffts_elaborate_offsets_odd8(offsets, log_N - 2, input_offset - stride,
+ output_offset + 3 * (1 << (log_N - 2)), stride * 4);
+ }
+}
+
+static void
+ffts_hardcodedleaf_is_rec_even4(ptrdiff_t **is,
+ int big_N,
+ int offset,
+ int stride,
+ int VL)
+{
+ int i, perm[4];
+
+ ffts_permute_addr(big_N, offset, stride, perm);
+
+ if (!((2 * perm[0]) % (2 * VL))) {
+ for (i = 0; i < 4; i++) {
+ (*is)[i] = 2 * perm[i];
+ }
+
+ *is += 4;
+ }
+}
+
+static void
+ffts_hardcodedleaf_is_rec_even8(ptrdiff_t **is,
+ int big_N,
+ int offset,
+ int stride,
+ int VL)
+{
+ int temp;
+
+ ffts_hardcodedleaf_is_rec_even4(is, big_N, offset, stride + 1, VL);
+
+ temp = offset + (1 << stride);
+ if (temp < 0) {
+ temp += big_N;
+ }
+
+ temp *= 2;
+
+ if (!(temp % (2 * VL))) {
+ int i;
+
+ (*is)[0] = offset + (1 << stride);
+ (*is)[1] = offset + (1 << stride) + (1 << (stride + 2));
+ (*is)[2] = offset - (1 << stride);
+ (*is)[3] = offset - (1 << stride) + (1 << (stride + 2));
+
+ for (i = 0; i < 4; i++) {
+ if ((*is)[i] < 0) {
+ (*is)[i] += big_N;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ (*is)[i] *= 2;
+ }
+
+ *is += 4;
+ }
+}
+#else
+static void
+ffts_elaborate_offsets_even(ptrdiff_t *const offsets,
+ int leaf_N,
+ int N,
+ int input_offset,
+ int output_offset,
+ int stride)
+{
+ if (N == leaf_N) {
+ offsets[2 * (output_offset / leaf_N) + 0] = input_offset * 2;
+ offsets[2 * (output_offset / leaf_N) + 1] = output_offset;
+ } else if (N > 4) {
+ ffts_elaborate_offsets_even(offsets, leaf_N,
+ N/2, input_offset, output_offset, stride * 2);
+
+ ffts_elaborate_offsets_odd(offsets, leaf_N,
+ N/4, input_offset + stride, output_offset + N/2, stride * 4);
+
+ if (N/4 >= leaf_N) {
+ ffts_elaborate_offsets_odd(offsets, leaf_N,
+ N/4, input_offset - stride, output_offset + 3*N/4, stride * 4);
+ }
+ }
+}
-#ifndef __PATTERNS_H__
-#define __PATTERNS_H__
+static void
+ffts_elaborate_offsets_odd(ptrdiff_t *const offsets,
+ int leaf_N,
+ int N,
+ int input_offset,
+ int output_offset,
+ int stride)
+{
+ if (N <= leaf_N) {
+ offsets[2 * (output_offset / leaf_N) + 0] = input_offset * 2;
+ offsets[2 * (output_offset / leaf_N) + 1] = output_offset;
+ } else if (N > 4) {
+ ffts_elaborate_offsets_odd(offsets, leaf_N, N/2,
+ input_offset, output_offset, stride * 2);
-#include "ffts.h"
+ ffts_elaborate_offsets_odd(offsets, leaf_N, N/4,
+ input_offset + stride, output_offset + N/2, stride * 4);
-void ffts_init_is(ffts_plan_t *p, int N, int leafN, int VL);
-void ffts_init_offsets(ffts_plan_t *p, int N, int leafN);
-//void ffts_init_tree(ffts_plan_t *p, int N, int leafN);
+ if (N/4 >= leaf_N) {
+ ffts_elaborate_offsets_odd(offsets, leaf_N, N/4,
+ input_offset - stride, output_offset + 3*N/4, stride * 4);
+ }
+ }
+}
+static void
+ffts_hardcodedleaf_is_rec_even(ptrdiff_t **is,
+ int big_N,
+ int N,
+ int offset,
+ int stride,
+ int VL)
+{
+ if (N > 4) {
+ ffts_hardcodedleaf_is_rec_even(is, big_N, N/2, offset, stride + 1, VL);
+
+ if (N/4 >= 4) {
+ ffts_hardcodedleaf_is_rec_odd(
+ is, big_N, N/4, offset + (1 << stride), stride + 2, VL);
+ ffts_hardcodedleaf_is_rec_odd(
+ is, big_N, N/4, offset - (1 << stride), stride + 2, VL);
+ } else {
+ int temp = offset + (1 << stride);
+
+ if (temp < 0) {
+ temp += big_N;
+ }
+
+ temp *= 2;
+
+ if (!(temp % (2 * VL))) {
+ int i;
+
+ (*is)[0] = offset + (1 << stride);
+ (*is)[1] = offset + (1 << stride) + (1 << (stride + 2));
+ (*is)[2] = offset - (1 << stride);
+ (*is)[3] = offset - (1 << stride) + (1 << (stride + 2));
+
+ for (i = 0; i < 4; i++) {
+ if ((*is)[i] < 0) {
+ (*is)[i] += big_N;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ (*is)[i] *= 2;
+ }
+
+ *is += 4;
+ }
+ }
+ } else if (N == 4) {
+ int perm[4];
+
+ ffts_permute_addr(big_N, offset, stride, perm);
+
+ if (!((2 * perm[0]) % (2 * VL))) {
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ (*is)[i] = 2 * perm[i];
+ }
+
+ *is += 4;
+ }
+ }
+}
+
+static void
+ffts_hardcodedleaf_is_rec_odd(ptrdiff_t **is,
+ int big_N,
+ int N,
+ int offset,
+ int stride,
+ int VL)
+{
+ if (N > 4) {
+ ffts_hardcodedleaf_is_rec_odd(is, big_N, N/2, offset, stride + 1, VL);
+
+ if (N/4 >= 4) {
+ ffts_hardcodedleaf_is_rec_odd(
+ is, big_N, N/4, offset + (1 << stride), stride + 2, VL);
+ ffts_hardcodedleaf_is_rec_odd(
+ is, big_N, N/4, offset - (1 << stride), stride + 2, VL);
+ } else {
+ int temp = offset + (1 << stride);
+
+ if (temp < 0) {
+ temp += big_N;
+ }
+
+ temp *= 2;
+
+ if (!(temp % (2 * VL))) {
+ int i;
+
+ (*is)[0] = offset + (1 << stride);
+ (*is)[1] = offset + (1 << stride) + (1 << (stride + 2));
+ (*is)[2] = offset - (1 << stride);
+ (*is)[3] = offset - (1 << stride) + (1 << (stride + 2));
+
+ for (i = 0; i < 4; i++) {
+ if ((*is)[i] < 0) {
+ (*is)[i] += big_N;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ (*is)[i] *= 2;
+ }
+
+ *is += 4;
+ }
+ }
+ } else if (N == 4) {
+ int perm[4];
+
+ ffts_permute_addr(big_N, offset, stride, perm);
+
+ if (!((2 * perm[0]) % (2 * VL))) {
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ (*is)[i] = 2 * perm[i];
+ }
+
+ *is += 4;
+ }
+ }
+}
+#endif
+
+static ptrdiff_t*
+ffts_init_is(size_t N, size_t leaf_N, int VL)
+{
+ int i, i0, i1, i2;
+ int stride = ffts_ctzl(N/leaf_N);
+ ptrdiff_t *is, *pis;
+
+ is = malloc(N / VL * sizeof(*is));
+ if (!is) {
+ return NULL;
+ }
+
+ i0 = N/leaf_N/3 + 1;
+ i1 = i2 = N/leaf_N/3;
+ if ((N/leaf_N) % 3 > 1) {
+ i1++;
+ }
+
+ pis = is;
+
+#if LEAF_N == 8
+ for (i = 0; i < i0; i++) {
+ ffts_hardcodedleaf_is_rec_even8(
+ &pis, N, i, stride, VL);
+ }
+
+ for (i = i0; i < i0 + i1; i++) {
+ ffts_hardcodedleaf_is_rec_even4(
+ &pis, N, i, stride + 1, VL);
+ ffts_hardcodedleaf_is_rec_even4(
+ &pis, N, i - (1 << stride), stride + 1, VL);
+ }
+
+ for (i = 0 - i2; i < 0; i++) {
+ ffts_hardcodedleaf_is_rec_even8(
+ &pis, N, i, stride, VL);
+ }
+#else
+ for (i = 0; i < i0; i++) {
+ ffts_hardcodedleaf_is_rec_even(
+ &pis, N, leaf_N, i, stride, VL);
+ }
+
+ for (i = i0; i < i0 + i1; i++) {
+ ffts_hardcodedleaf_is_rec_even(
+ &pis, N, leaf_N / 2, i, stride + 1, VL);
+ ffts_hardcodedleaf_is_rec_even(
+ &pis, N, leaf_N / 2, i - (1 << stride), stride + 1, VL);
+ }
+
+ for (i = 0 - i2; i < 0; i++) {
+ ffts_hardcodedleaf_is_rec_even(
+ &pis, N, leaf_N, i, stride, VL);
+ }
#endif
+
+ return is;
+}
+
+static ptrdiff_t*
+ffts_init_offsets(size_t N, size_t leaf_N)
+{
+ ptrdiff_t *offsets, *tmp;
+ size_t i;
+
+ offsets = malloc(N/leaf_N * sizeof(*offsets));
+ if (!offsets) {
+ return NULL;
+ }
+
+ tmp = malloc(2 * N/leaf_N * sizeof(*tmp));
+ if (!tmp) {
+ free(offsets);
+ return NULL;
+ }
+
+#if LEAF_N == 8
+ ffts_elaborate_offsets_even8(tmp, ffts_ctzl(N));
+#else
+ ffts_elaborate_offsets_even(tmp, leaf_N, N, 0, 0, 1);
+#endif
+
+ for (i = 0; i < 2*N/leaf_N; i += 2) {
+ if (tmp[i] < 0) {
+ tmp[i] += N;
+ }
+ }
+
+ qsort(tmp, N/leaf_N, 2 * sizeof(*tmp), ffts_compare_offsets);
+
+ for (i = 0; i < N/leaf_N; i++) {
+ offsets[i] = 2 * tmp[2*i + 1];
+ }
+
+ free(tmp);
+ return offsets;
+}
+
+#endif /* FFTS_PATTERNS_H */
diff --git a/lib/ffts/src/sequitur.h b/lib/ffts/src/sequitur.h
new file mode 100644
index 0000000..d459c56
--- /dev/null
+++ b/lib/ffts/src/sequitur.h
@@ -0,0 +1,448 @@
+/*
+
+ This file is part of FFTS -- The Fastest Fourier Transform in the South
+
+ Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
+ Copyright (c) 2012, The University of Waikato
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the organization nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+typedef struct _sym_t {
+ int c;
+ struct _sym_t *pPrev, *pNext;
+ struct _seq_rule_t *r;
+ int offset;
+} sym_t;
+
+typedef struct _seq_rule_t {
+ int c;
+ sym_t *ss;
+ struct _seq_rule_t *pPrev, *pNext;
+ int count;
+ int length;
+} seq_rule_t;
+
+void sym_tail_insert(sym_t **ss, sym_t *s)
+{
+ if (!*ss) {
+ *ss = s;
+ s->pPrev = s->pNext = NULL;
+ } else {
+ while (*ss) {
+ s->pPrev = *ss;
+ ss = &(*ss)->pNext;
+ }
+
+ *ss = s;
+ }
+}
+
+sym_t* sym_init(int c)
+{
+ sym_t *s;
+
+ s = (sym_t*) malloc(sizeof(*s));
+ if (!s) {
+ return NULL;
+ }
+
+ s->c = c;
+ s->pPrev = s->pNext = NULL;
+ s->r = NULL;
+
+ return s;
+}
+
+sym_t* sym_init_from_sym(sym_t *s2)
+{
+ sym_t *s;
+
+ s = (sym_t*) malloc(sizeof(*s));
+ if (!s) {
+ return NULL;
+ }
+
+ s->c = s2->c;
+ s->pPrev = s->pNext = NULL;
+ s->r = s2->r;
+ s->offset = s2->offset;
+
+ return s;
+}
+
+seq_rule_t* seq_init_rule(int c)
+{
+ seq_rule_t *G;
+
+ G = (seq_rule_t *)malloc(sizeof(*G));
+ if (!G) {
+ return NULL;
+ }
+
+ G->c = c;
+ G->count = 2;
+ G->ss = NULL;
+ G->pPrev = NULL;
+ G->pNext = NULL;
+
+ return G;
+}
+
+seq_rule_t* seq_grammer_insert_new_rule(seq_rule_t *G, char r, sym_t *a, sym_t *b)
+{
+ sym_t *sa, *sb;
+
+ while (G->pNext) {
+ G = G->pNext;
+ }
+
+ G->pNext = seq_init_rule(r);
+ if (!G->pNext) {
+ return NULL;
+ }
+
+ sa = sym_init_from_sym(a);
+ if (!sa) {
+ goto cleanup_pnext;
+ }
+
+ sb = sym_init_from_sym(b);
+ if (!sb) {
+ goto cleanup_sa;
+ }
+
+ sb->offset = sb->offset - sa->offset;
+ sa->offset = 0;
+ sym_tail_insert(&G->pNext->ss, sa);
+ sym_tail_insert(&G->pNext->ss, sb);
+ return G->pNext;
+
+cleanup_sa:
+ free(sa);
+
+cleanup_pnext:
+ free(G->pNext);
+ G->pNext = NULL;
+
+ return NULL;
+}
+
+sym_t* sym_match_digram(sym_t *s, sym_t *term, sym_t *a, sym_t *b)
+{
+ while (s != term) {
+ if (s->c == a->c && s->pNext->c == b->c &&
+ s->pNext->offset - s->offset == b->offset-a->offset) {
+ return s;
+ }
+
+ s = s->pNext;
+ }
+
+ return NULL;
+}
+
+seq_rule_t* seq_match_digram(seq_rule_t *R, sym_t *a, sym_t *b)
+{
+ while (R) {
+ if (R->ss->c == a->c && R->ss->pNext->c == b->c &&
+ R->ss->pNext->offset - R->ss->offset == b->offset - a->offset) {
+ return R;
+ }
+
+ R = R->pNext;
+ }
+
+ return NULL;
+}
+
+sym_t* sym_tail(sym_t *s)
+{
+ while (s->pNext) {
+ s = s->pNext;
+ }
+
+ return s;
+}
+
+int sym_count(sym_t *s)
+{
+ int count = 0;
+
+ while (s) {
+ count++;
+ s = s->pNext;
+ }
+
+ return count;
+}
+
+sym_t* sym_copylist(sym_t *s)
+{
+ sym_t *head = NULL;
+ sym_t *prev = head;
+
+ while (s) {
+ sym_t *copy = sym_init_from_sym(s);
+ if (!copy) {
+ return NULL;
+ }
+
+ copy->pPrev = prev;
+
+ if (prev) {
+ prev->pNext = copy;
+ }
+
+ if (!head) {
+ head = copy;
+ }
+
+ prev = copy;
+ s = s->pNext;
+ }
+
+ return head;
+}
+
+void seq_enforce_uniqueness(seq_rule_t *G)
+{
+ seq_rule_t *R = G;//->pNext;
+ seq_rule_t **ppr = &G->pNext;
+
+ while (R) {
+ if (R == G || R->count > 1) {
+ sym_t *s = R->ss;
+ sym_t **pp = &R->ss;
+
+ while (s) {
+ if (s->r && s->r->count == 1) {
+ sym_t *temp_itr;
+
+ *pp = s->r->ss;
+
+ temp_itr = s->r->ss;
+ while (temp_itr) {
+ temp_itr->offset += s->offset;
+ temp_itr = temp_itr->pNext;
+ }
+
+ s->r->ss->pPrev = s->pPrev;
+ if (s->pNext) {
+ s->pNext->pPrev = sym_tail(s->r->ss);
+ }
+
+ sym_tail(s->r->ss)->pNext = s->pNext;
+ s = s->r->ss;
+ continue;
+ }
+
+ pp = &s->pNext;
+ s = s->pNext;
+ }
+
+ ppr = &R->pNext;
+ } else {
+ *ppr = R->pNext;
+ }
+
+ R = R->pNext;
+ }
+}
+
+void seq_merge_small_rules(seq_rule_t *G, int thresh)
+{
+ seq_rule_t *R = G;
+
+ while (R) {
+ if (sym_count(R->ss) <= thresh) {
+ //printf("count %d > %d for %d\n", sym_count(R->ss), thresh, R->c);
+ sym_t *s = R->ss;
+ sym_t **pp = &R->ss;
+
+ while (s) {
+ if (s->r) {
+ sym_t *copylist;
+ sym_t *copylist_itr;
+
+ s->r->count--;
+
+ copylist = sym_copylist(s->r->ss);
+ if (!copylist) {
+ return;
+ }
+
+ copylist_itr = copylist;
+ while (copylist_itr) {
+ copylist_itr->offset += s->offset;
+ copylist_itr = copylist_itr->pNext;
+ }
+
+ *pp = copylist;
+ copylist->pPrev = s->pPrev;
+ if (s->pNext) {
+ s->pNext->pPrev = sym_tail(copylist);
+ }
+
+ sym_tail(copylist)->pNext = s->pNext;
+ pp = &(sym_tail(copylist)->pNext);
+ s = sym_tail(copylist)->pNext;
+ continue;
+ }
+
+ pp = &s->pNext;
+ s = s->pNext;
+ }
+ }
+
+ R = R->pNext;
+ }
+
+ seq_enforce_uniqueness(G);
+}
+
+void seq_extract_hierarchy(seq_rule_t *G)
+{
+ int next_rule = -2;
+ sym_t *cursym = G->ss;
+
+ while (cursym) {
+ sym_t *m = NULL;
+ seq_rule_t *mr = NULL;
+
+ if (cursym->pPrev && cursym->pPrev->pPrev) {
+ mr = seq_match_digram(G->pNext, cursym->pPrev, cursym);
+ if (mr) {
+ if (cursym->pPrev->r) {
+ cursym->pPrev->r->count--;
+ }
+
+ if(cursym->r) {
+ cursym->r->count--;
+ }
+
+ mr->count++;
+
+ cursym->pPrev->r = mr;
+ cursym->pPrev->c = mr->c;
+ cursym->pPrev->pNext = cursym->pNext;
+ cursym->pNext->pPrev = cursym->pPrev;
+ cursym = cursym->pPrev;
+ }
+
+ m = sym_match_digram(G->ss, cursym->pPrev->pPrev, cursym->pPrev, cursym);
+ if (m) {
+ seq_rule_t *newr;
+
+ if (cursym->pPrev->r) {
+ cursym->pPrev->r->count--;
+ }
+
+ if (cursym->r) {
+ cursym->r->count--;
+ }
+
+ newr = seq_grammer_insert_new_rule(G, next_rule, m, m->pNext);
+ if (!newr) {
+ return;
+ }
+
+ m->r = newr;
+ m->c = next_rule;
+ m->pNext = m->pNext->pNext;
+ m->pNext->pPrev = m;
+
+ cursym->pPrev->r = newr;
+ cursym->pPrev->c = next_rule;
+ cursym->pPrev->pNext = cursym->pNext;
+ cursym->pNext->pPrev = cursym->pPrev;
+ cursym = cursym->pPrev;
+
+ next_rule--;
+ }
+ }
+
+ if (!m && !mr) {
+ cursym = cursym->pNext;
+ }
+ }
+
+ seq_enforce_uniqueness(G);
+ seq_merge_small_rules(G, 2);
+// seq_enforce_uniqueness(G);
+}
+
+void seq_compute_lengths(seq_rule_t *G)
+{
+ seq_rule_t *R = G->pNext;
+ sym_t *s;
+ int sum;
+
+ while (R) {
+ sum = 0;
+ s = R->ss;
+
+ while (s) {
+ if (s->c >= 0) {
+ if (s->offset + s->c > sum) {
+ sum = s->offset + s->c;
+ }
+ }
+
+ if (s->c < 0) {
+ if (s->offset + s->r->length > sum) {
+ sum = s->offset + s->r->length;
+ }
+ }
+
+ s = s->pNext;
+ }
+
+ R->length = sum;
+ R = R->pNext;
+ }
+
+ sum = 0;
+ s = G->ss;
+
+ while (s) {
+ if (s->c >= 0) {
+ if (s->offset + s->c > sum) {
+ sum = s->offset + s->c;
+ }
+ }
+
+ if (s->c < 0) {
+ if (s->offset + s->r->length > sum) {
+ sum = s->offset + s->r->length;
+ }
+ }
+
+ s = s->pNext;
+ }
+
+ G->length = sum;
+} \ No newline at end of file
diff --git a/lib/ffts/src/sse.s b/lib/ffts/src/sse.s
deleted file mode 100644
index 79dd6ec..0000000
--- a/lib/ffts/src/sse.s
+++ /dev/null
@@ -1,878 +0,0 @@
-/*
-
- This file is part of FFTS -- The Fastest Fourier Transform in the South
-
- Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the organization nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-
- .globl _neon_x4
- .align 4
-_neon_x4:
-
- .globl _neon_x8
- .align 4
-_neon_x8:
-
- .globl _neon_x8_t
- .align 4
-_neon_x8_t:
-
-
-#ifdef __APPLE__
- .globl _leaf_ee_init
-_leaf_ee_init:
-#else
- .globl leaf_ee_init
-leaf_ee_init:
-#endif
- #lea L_sse_constants(%rip), %r9
- movq 0xe0(%rdi), %r9
- xorl %eax, %eax
-# eax is loop counter (init to 0)
-# rcx is loop max count
-# rsi is 'in' base pointer
-# rdx is 'out' base pointer
-# r8 is offsets pointer
-# r9 is constants pointer
-# scratch: rax r11 r12
-# .align 4, 0x90
-
-# _leaf_ee + 9 needs 16 byte alignment
-#ifdef __APPLE__
- .globl _leaf_ee
-_leaf_ee:
-#else
- .globl leaf_ee
-leaf_ee:
-#endif
- movaps 32(%r9), %xmm0 #83.5
- movaps (%r9), %xmm8 #83.5
-LEAF_EE_1:
-LEAF_EE_const_0:
- movaps 0xFECA(%rsi,%rax,4), %xmm7 #83.5
-LEAF_EE_const_2:
- movaps 0xFECA(%rsi,%rax,4), %xmm12 #83.5
- movaps %xmm7, %xmm6 #83.5
-LEAF_EE_const_3:
- movaps 0xFECA(%rsi,%rax,4), %xmm10 #83.5
- movaps %xmm12, %xmm11 #83.5
- subps %xmm10, %xmm12 #83.5
- addps %xmm10, %xmm11 #83.5
- xorps %xmm8, %xmm12 #83.5
-LEAF_EE_const_1:
- movaps 0xFECA(%rsi,%rax,4), %xmm9 #83.5
-LEAF_EE_const_4:
- movaps 0xFECA(%rsi,%rax,4), %xmm10 #83.5
- addps %xmm9, %xmm6 #83.5
- subps %xmm9, %xmm7 #83.5
-LEAF_EE_const_5:
- movaps 0xFECA(%rsi,%rax,4), %xmm13 #83.5
- movaps %xmm10, %xmm9 #83.5
-LEAF_EE_const_6:
- movaps 0xFECA(%rsi,%rax,4), %xmm3 #83.5
- movaps %xmm6, %xmm5 #83.5
-LEAF_EE_const_7:
- movaps 0xFECA(%rsi,%rax,4), %xmm14 #83.5
- movaps %xmm3, %xmm15 #83.5
- shufps $177, %xmm12, %xmm12 #83.5
- movaps %xmm7, %xmm4 #83.5
- movslq (%r8, %rax, 4), %r11 #83.44
- subps %xmm13, %xmm10 #83.5
- subps %xmm14, %xmm3 #83.5
- addps %xmm11, %xmm5 #83.5
- subps %xmm11, %xmm6 #83.5
- subps %xmm12, %xmm4 #83.5
- addps %xmm12, %xmm7 #83.5
- addps %xmm13, %xmm9 #83.5
- addps %xmm14, %xmm15 #83.5
- movaps 16(%r9), %xmm12 #83.5
- movaps %xmm9, %xmm1 #83.5
- movaps 16(%r9), %xmm11 #83.5
- movaps %xmm5, %xmm2 #83.5
- mulps %xmm10, %xmm12 #83.5
- subps %xmm15, %xmm9 #83.5
- addps %xmm15, %xmm1 #83.5
- mulps %xmm3, %xmm11 #83.5
- addps %xmm1, %xmm2 #83.5
- subps %xmm1, %xmm5 #83.5
- shufps $177, %xmm10, %xmm10 #83.5
- xorps %xmm8, %xmm9 #83.5
- shufps $177, %xmm3, %xmm3 #83.5
- movaps %xmm6, %xmm1 #83.5
- mulps %xmm0, %xmm10 #83.5
- movaps %xmm4, %xmm13 #83.5
- mulps %xmm0, %xmm3 #83.5
- subps %xmm10, %xmm12 #83.5
- addps %xmm3, %xmm11 #83.5
- movaps %xmm12, %xmm3 #83.5
- movaps %xmm7, %xmm14 #83.5
- shufps $177, %xmm9, %xmm9 #83.5
- subps %xmm11, %xmm12 #83.5
- addps %xmm11, %xmm3 #83.5
- subps %xmm9, %xmm1 #83.5
- addps %xmm9, %xmm6 #83.5
- addps %xmm3, %xmm4 #83.5
- subps %xmm3, %xmm13 #83.5
- xorps %xmm8, %xmm12 #83.5
- movaps %xmm2, %xmm3 #83.5
- shufps $177, %xmm12, %xmm12 #83.5
- movaps %xmm6, %xmm9 #83.5
- movslq 8(%r8, %rax, 4), %r12 #83.59
- movlhps %xmm4, %xmm3 #83.5
- addq $4, %rax
- shufps $238, %xmm4, %xmm2 #83.5
- movaps %xmm1, %xmm4 #83.5
- #movntdq %xmm3, (%rdx,%r11,4) #83.5
- subps %xmm12, %xmm7 #83.5
- addps %xmm12, %xmm14 #83.5
- movlhps %xmm7, %xmm4 #83.5
- shufps $238, %xmm7, %xmm1 #83.5
- movaps %xmm5, %xmm7 #83.5
- movlhps %xmm13, %xmm7 #83.5
- movlhps %xmm14, %xmm9 #83.5
- shufps $238, %xmm13, %xmm5 #83.5
- shufps $238, %xmm14, %xmm6 #83.5
- movaps %xmm3, (%rdx,%r11,4) #83.5
- movaps %xmm4, 16(%rdx,%r11,4) #83.5
- movaps %xmm7, 32(%rdx,%r11,4) #83.5
- movaps %xmm9, 48(%rdx,%r11,4) #83.5
- movaps %xmm2, (%rdx,%r12,4) #83.5
- movaps %xmm1, 16(%rdx,%r12,4) #83.5
- movaps %xmm5, 32(%rdx,%r12,4) #83.5
- movaps %xmm6, 48(%rdx,%r12,4) #83.5
- cmpq %rcx, %rax
- jne LEAF_EE_1
-
-
-
-# _leaf_oo + 4 needs to be 16 byte aligned
-#ifdef __APPLE__
- .globl _leaf_oo
-_leaf_oo:
-#else
- .globl leaf_oo
-leaf_oo:
-#endif
- movaps (%r9), %xmm5 #92.7
-LEAF_OO_1:
-LEAF_OO_const_0:
- movaps 0xFECA(%rsi,%rax,4), %xmm4 #93.5
- movaps %xmm4, %xmm6 #93.5
-LEAF_OO_const_1:
- movaps 0xFECA(%rsi,%rax,4), %xmm7 #93.5
-LEAF_OO_const_2:
- movaps 0xFECA(%rsi,%rax,4), %xmm10 #93.5
- addps %xmm7, %xmm6 #93.5
- subps %xmm7, %xmm4 #93.5
-LEAF_OO_const_3:
- movaps 0xFECA(%rsi,%rax,4), %xmm8 #93.5
- movaps %xmm10, %xmm9 #93.5
-LEAF_OO_const_4:
- movaps 0xFECA(%rsi,%rax,4), %xmm1 #93.5
- movaps %xmm6, %xmm3 #93.5
-LEAF_OO_const_5:
- movaps 0xFECA(%rsi,%rax,4), %xmm11 #93.5
- movaps %xmm1, %xmm2 #93.5
-LEAF_OO_const_6:
- movaps 0xFECA(%rsi,%rax,4), %xmm14 #93.5
- movaps %xmm4, %xmm15 #93.5
-LEAF_OO_const_7:
- movaps 0xFECA(%rsi,%rax,4), %xmm12 #93.5
- movaps %xmm14, %xmm13 #93.5
- movslq (%r8, %rax, 4), %r11 #83.44
- subps %xmm8, %xmm10 #93.5
- addps %xmm8, %xmm9 #93.5
- addps %xmm11, %xmm2 #93.5
- subps %xmm12, %xmm14 #93.5
- subps %xmm11, %xmm1 #93.5
- addps %xmm12, %xmm13 #93.5
- addps %xmm9, %xmm3 #93.5
- subps %xmm9, %xmm6 #93.5
- xorps %xmm5, %xmm10 #93.5
- xorps %xmm5, %xmm14 #93.5
- shufps $177, %xmm10, %xmm10 #93.5
- movaps %xmm2, %xmm9 #93.5
- shufps $177, %xmm14, %xmm14 #93.5
- movaps %xmm6, %xmm7 #93.5
- movslq 8(%r8, %rax, 4), %r12 #83.59
- addq $4, %rax #92.18
- addps %xmm10, %xmm4 #93.5
- addps %xmm13, %xmm9 #93.5
- subps %xmm13, %xmm2 #93.5
- subps %xmm10, %xmm15 #93.5
- movaps %xmm1, %xmm13 #93.5
- movaps %xmm2, %xmm8 #93.5
- movlhps %xmm4, %xmm7 #93.5
- subps %xmm14, %xmm13 #93.5
- addps %xmm14, %xmm1 #93.5
- shufps $238, %xmm4, %xmm6 #93.5
- movaps %xmm3, %xmm14 #93.5
- movaps %xmm9, %xmm4 #93.5
- movlhps %xmm15, %xmm14 #93.5
- movlhps %xmm13, %xmm4 #93.5
- movlhps %xmm1, %xmm8 #93.5
- shufps $238, %xmm15, %xmm3 #93.5
- shufps $238, %xmm13, %xmm9 #93.5
- shufps $238, %xmm1, %xmm2 #93.5
- movaps %xmm14, (%rdx,%r11,4) #93.5
- movaps %xmm7, 16(%rdx,%r11,4) #93.5
- movaps %xmm4, 32(%rdx,%r11,4) #93.5
- movaps %xmm8, 48(%rdx,%r11,4) #93.5
- movaps %xmm3, (%rdx,%r12,4) #93.5
- movaps %xmm6, 16(%rdx,%r12,4) #93.5
- movaps %xmm9, 32(%rdx,%r12,4) #93.5
- movaps %xmm2, 48(%rdx,%r12,4) #93.5
- cmpq %rcx, %rax
- jne LEAF_OO_1 # Prob 95% #92.14
-
-#ifdef __APPLE__
- .globl _leaf_eo
-_leaf_eo:
-#else
- .globl leaf_eo
-leaf_eo:
-#endif
-LEAF_EO_const_0:
- movaps 0xFECA(%rsi,%rax,4), %xmm9 #88.5
-LEAF_EO_const_2:
- movaps 0xFECA(%rsi,%rax,4), %xmm7 #88.5
- movaps %xmm9, %xmm11 #88.5
-LEAF_EO_const_3:
- movaps 0xFECA(%rsi,%rax,4), %xmm5 #88.5
- movaps %xmm7, %xmm6 #88.5
-LEAF_EO_const_1:
- movaps 0xFECA(%rsi,%rax,4), %xmm4 #88.5
- subps %xmm5, %xmm7 #88.5
- addps %xmm4, %xmm11 #88.5
- subps %xmm4, %xmm9 #88.5
- addps %xmm5, %xmm6 #88.5
- movaps (%r9), %xmm3 #88.5
- movaps %xmm11, %xmm10 #88.5
- xorps %xmm3, %xmm7 #88.5
- movaps %xmm9, %xmm8 #88.5
- shufps $177, %xmm7, %xmm7 #88.5
- addps %xmm6, %xmm10 #88.5
- subps %xmm6, %xmm11 #88.5
- subps %xmm7, %xmm8 #88.5
- addps %xmm7, %xmm9 #88.5
- movslq 8(%r8, %rax, 4), %r12 #83.59
- movaps %xmm10, %xmm2 #88.5
- movslq (%r8, %rax, 4), %r11 #83.44
- movaps %xmm11, %xmm1 #88.5
- shufps $238, %xmm8, %xmm10 #88.5
- shufps $238, %xmm9, %xmm11 #88.5
- movaps %xmm10, (%rdx,%r12,4) #88.5
- movaps %xmm11, 16(%rdx,%r12,4) #88.5
-LEAF_EO_const_4:
- movaps 0xFECA(%rsi,%rax,4), %xmm15 #88.5
-LEAF_EO_const_5:
- movaps 0xFECA(%rsi,%rax,4), %xmm12 #88.5
- movaps %xmm15, %xmm14 #88.5
-LEAF_EO_const_6:
- movaps 0xFECA(%rsi,%rax,4), %xmm4 #88.5
- addps %xmm12, %xmm14 #88.5
- subps %xmm12, %xmm15 #88.5
-LEAF_EO_const_7:
- movaps 0xFECA(%rsi,%rax,4), %xmm13 #88.5
- movaps %xmm4, %xmm5 #88.5
- movaps %xmm14, %xmm7 #88.5
- addps %xmm13, %xmm5 #88.5
- subps %xmm13, %xmm4 #88.5
- movlhps %xmm8, %xmm2 #88.5
- movaps %xmm5, %xmm8 #88.5
- movlhps %xmm15, %xmm7 #88.5
- xorps %xmm3, %xmm15 #88.5
- movaps %xmm5, %xmm6 #88.5
- subps %xmm14, %xmm5 #88.5
- addps %xmm14, %xmm6 #88.5
- movlhps %xmm9, %xmm1 #88.5
- movaps %xmm4, %xmm14 #88.5
- movlhps %xmm4, %xmm8 #88.5
- movaps %xmm1, %xmm12 #88.5
- shufps $177, %xmm15, %xmm15 #88.5
- movaps 0x30(%r9), %xmm11 #88.5
- addq $4, %rax #90.5
- subps %xmm15, %xmm14 #88.5
- mulps %xmm7, %xmm11 #88.5
- addps %xmm15, %xmm4 #88.5
- movaps 0x30(%r9), %xmm9 #88.5
- movaps 0x40(%r9), %xmm15 #88.5
- shufps $177, %xmm7, %xmm7 #88.5
- mulps %xmm8, %xmm9 #88.5
- mulps %xmm15, %xmm7 #88.5
- shufps $177, %xmm8, %xmm8 #88.5
- subps %xmm7, %xmm11 #88.5
- mulps %xmm15, %xmm8 #88.5
- movaps %xmm11, %xmm10 #88.5
- addps %xmm8, %xmm9 #88.5
- shufps $238, %xmm14, %xmm6 #88.5
- subps %xmm9, %xmm11 #88.5
- addps %xmm9, %xmm10 #88.5
- xorps %xmm3, %xmm11 #88.5
- movaps %xmm2, %xmm3 #88.5
- shufps $177, %xmm11, %xmm11 #88.5
- subps %xmm10, %xmm3 #88.5
- addps %xmm10, %xmm2 #88.5
- addps %xmm11, %xmm12 #88.5
- subps %xmm11, %xmm1 #88.5
- shufps $238, %xmm4, %xmm5 #88.5
- movaps %xmm5, 48(%rdx,%r12,4) #88.5
- movaps %xmm6, 32(%rdx,%r12,4) #88.5
- movaps %xmm2, (%rdx,%r11,4) #88.5
- movaps %xmm1, 16(%rdx,%r11,4) #88.5
- movaps %xmm3, 32(%rdx,%r11,4) #88.5
- movaps %xmm12, 48(%rdx,%r11,4) #88.5
-
-
-#ifdef __APPLE__
- .globl _leaf_oe
-_leaf_oe:
-#else
- .globl leaf_oe
-leaf_oe:
-#endif
- movaps (%r9), %xmm0 #59.5
- #movaps 0x20(%r9), %xmm1 #59.5
-LEAF_OE_const_2:
- movaps 0xFECA(%rsi,%rax,4), %xmm6 #70.5
-LEAF_OE_const_3:
- movaps 0xFECA(%rsi,%rax,4), %xmm8 #70.5
- movaps %xmm6, %xmm10 #70.5
- shufps $228, %xmm8, %xmm10 #70.5
- movaps %xmm10, %xmm9 #70.5
- shufps $228, %xmm6, %xmm8 #70.5
-LEAF_OE_const_0:
- movaps 0xFECA(%rsi,%rax,4), %xmm12 #70.5
-LEAF_OE_const_1:
- movaps 0xFECA(%rsi,%rax,4), %xmm7 #70.5
- movaps %xmm12, %xmm14 #70.5
- movslq (%r8, %rax, 4), %r11 #83.44
- addps %xmm8, %xmm9 #70.5
- subps %xmm8, %xmm10 #70.5
- addps %xmm7, %xmm14 #70.5
- subps %xmm7, %xmm12 #70.5
- movaps %xmm9, %xmm4 #70.5
- movaps %xmm14, %xmm13 #70.5
- shufps $238, %xmm10, %xmm4 #70.5
- xorps %xmm0, %xmm10 #70.5
- shufps $177, %xmm10, %xmm10 #70.5
- movaps %xmm12, %xmm11 #70.5
- movaps %xmm14, %xmm5 #70.5
- addps %xmm9, %xmm13 #70.5
- subps %xmm10, %xmm11 #70.5
- subps %xmm9, %xmm14 #70.5
- shufps $238, %xmm12, %xmm5 #70.5
- addps %xmm10, %xmm12 #70.5
- movslq 8(%r8, %rax, 4), %r12 #83.59
- movlhps %xmm11, %xmm13 #70.5
- movaps %xmm13, (%rdx,%r11,4) #70.5
- movaps 0x30(%r9), %xmm13 #70.5
- movlhps %xmm12, %xmm14 #70.5
- movaps 0x40(%r9), %xmm12 #70.5
- mulps %xmm5, %xmm13 #70.5
- shufps $177, %xmm5, %xmm5 #70.5
- mulps %xmm12, %xmm5 #70.5
- movaps %xmm14, 16(%rdx,%r11,4) #70.5
- subps %xmm5, %xmm13 #70.5
- movaps 0x30(%r9), %xmm5 #70.5
- mulps %xmm4, %xmm5 #70.5
- shufps $177, %xmm4, %xmm4 #70.5
- mulps %xmm12, %xmm4 #70.5
-LEAF_OE_const_4:
- movaps 0xFECA(%rsi,%rax,4), %xmm9 #70.5
- addps %xmm4, %xmm5 #70.5
-LEAF_OE_const_6:
- movaps 0xFECA(%rsi,%rax,4), %xmm7 #70.5
- movaps %xmm9, %xmm3 #70.5
-LEAF_OE_const_7:
- movaps 0xFECA(%rsi,%rax,4), %xmm2 #70.5
- movaps %xmm7, %xmm6 #70.5
-LEAF_OE_const_5:
- movaps 0xFECA(%rsi,%rax,4), %xmm15 #70.5
- movaps %xmm13, %xmm4 #70.5
- subps %xmm2, %xmm7 #70.5
- addps %xmm15, %xmm3 #70.5
- subps %xmm15, %xmm9 #70.5
- addps %xmm2, %xmm6 #70.5
- subps %xmm5, %xmm13 #70.5
- addps %xmm5, %xmm4 #70.5
- xorps %xmm0, %xmm7 #70.5
- addq $4, %rax #72.5
- movaps %xmm3, %xmm2 #70.5
- shufps $177, %xmm7, %xmm7 #70.5
- movaps %xmm9, %xmm8 #70.5
- xorps %xmm0, %xmm13 #70.5
- addps %xmm6, %xmm2 #70.5
- subps %xmm7, %xmm8 #70.5
- subps %xmm6, %xmm3 #70.5
- addps %xmm7, %xmm9 #70.5
- movaps %xmm2, %xmm10 #70.5
- movaps %xmm3, %xmm11 #70.5
- shufps $238, %xmm8, %xmm2 #70.5
- shufps $238, %xmm9, %xmm3 #70.5
- movaps %xmm2, %xmm14 #70.5
- shufps $177, %xmm13, %xmm13 #70.5
- subps %xmm4, %xmm14 #70.5
- addps %xmm4, %xmm2 #70.5
- movaps %xmm3, %xmm4 #70.5
- subps %xmm13, %xmm3 #70.5
- addps %xmm13, %xmm4 #70.5
- movlhps %xmm8, %xmm10 #70.5
- movlhps %xmm9, %xmm11 #70.5
- movaps %xmm10, 32(%rdx,%r11,4) #70.5
- movaps %xmm11, 48(%rdx,%r11,4) #70.5
- movaps %xmm2, (%rdx,%r12,4) #70.5
- movaps %xmm3, 16(%rdx,%r12,4) #70.5
- movaps %xmm14, 32(%rdx,%r12,4) #70.5
- movaps %xmm4, 48(%rdx,%r12,4) #70.5
-
-
-#ifdef __APPLE__
- .globl _leaf_end
-_leaf_end:
-#else
- .globl leaf_end
-leaf_end:
-#endif
-
-#ifdef __APPLE__
- .globl _x_init
-_x_init:
-#else
- .globl x_init
-x_init:
-#endif
- #movaps L_sse_constants(%rip), %xmm3 #34.3
- movaps (%r9), %xmm3 #34.3
- movq 0x20(%rdi),%r8
-#ifdef __APPLE__
- .globl _x4
-_x4:
-#else
- .globl x4
-x4:
-#endif
- movaps 64(%rdx), %xmm0 #34.3
- movaps 96(%rdx), %xmm1 #34.3
- movaps (%rdx), %xmm7 #34.3
- movaps (%r8), %xmm4 #const
- movaps %xmm7, %xmm9 #34.3
- movaps %xmm4, %xmm6 #34.3
- movaps 16(%r8), %xmm2 #const
- mulps %xmm0, %xmm6 #34.3
- mulps %xmm1, %xmm4 #34.3
- shufps $177, %xmm0, %xmm0 #34.3
- shufps $177, %xmm1, %xmm1 #34.3
- mulps %xmm2, %xmm0 #34.3
- mulps %xmm1, %xmm2 #34.3
- subps %xmm0, %xmm6 #34.3
- addps %xmm2, %xmm4 #34.3
- movaps %xmm6, %xmm5 #34.3
- subps %xmm4, %xmm6 #34.3
- addps %xmm4, %xmm5 #34.3
- movaps 32(%rdx), %xmm8 #34.3
- xorps %xmm3, %xmm6 #34.3
- shufps $177, %xmm6, %xmm6 #34.3
- movaps %xmm8, %xmm10 #34.3
- movaps 112(%rdx), %xmm12 #34.3
- subps %xmm5, %xmm9 #34.3
- addps %xmm5, %xmm7 #34.3
- addps %xmm6, %xmm10 #34.3
- subps %xmm6, %xmm8 #34.3
- movaps %xmm7, (%rdx) #34.3
- movaps %xmm8, 32(%rdx) #34.3
- movaps %xmm9, 64(%rdx) #34.3
- movaps %xmm10, 96(%rdx) #34.3
- movaps 32(%r8), %xmm14 #const #34.3
- movaps 80(%rdx), %xmm11 #34.3
- movaps %xmm14, %xmm0 #34.3
- movaps 48(%r8), %xmm13 #const #34.3
- mulps %xmm11, %xmm0 #34.3
- mulps %xmm12, %xmm14 #34.3
- shufps $177, %xmm11, %xmm11 #34.3
- shufps $177, %xmm12, %xmm12 #34.3
- mulps %xmm13, %xmm11 #34.3
- mulps %xmm12, %xmm13 #34.3
- subps %xmm11, %xmm0 #34.3
- addps %xmm13, %xmm14 #34.3
- movaps %xmm0, %xmm15 #34.3
- subps %xmm14, %xmm0 #34.3
- addps %xmm14, %xmm15 #34.3
- xorps %xmm3, %xmm0 #34.3
- movaps 16(%rdx), %xmm1 #34.3
- movaps 48(%rdx), %xmm2 #34.3
- movaps %xmm1, %xmm4 #34.3
- shufps $177, %xmm0, %xmm0 #34.3
- movaps %xmm2, %xmm5 #34.3
- addps %xmm15, %xmm1 #34.3
- subps %xmm0, %xmm2 #34.3
- subps %xmm15, %xmm4 #34.3
- addps %xmm0, %xmm5 #34.3
- movaps %xmm1, 16(%rdx) #34.3
- movaps %xmm2, 48(%rdx) #34.3
- movaps %xmm4, 80(%rdx) #34.3
- movaps %xmm5, 112(%rdx) #34.3
- ret
-
-# _x8_soft + 5 needs to be 16 byte aligned
-#ifdef __APPLE__
- .globl _x8_soft
-_x8_soft:
-#else
- .globl x8_soft
-x8_soft:
-#endif
- xorl %eax, %eax
- movq %rdx, %rbx
- movq %r8, %rsi
- leaq (%rdx,%rcx,4), %r9
- leaq (%r9,%rcx,4), %r10
- leaq (%r10,%rcx,4), %r11
- leaq (%r11,%rcx,4), %r12
- leaq (%r12,%rcx,4), %r13
- leaq (%r13,%rcx,4), %r14
- leaq (%r14,%rcx,4), %r15
-X8_soft_loop:
- movaps (%rsi), %xmm9
- movaps (%r10,%rax,4), %xmm6
- movaps %xmm9, %xmm11
- movaps (%r11,%rax,4), %xmm7
- movaps 16(%rsi), %xmm8
- mulps %xmm6, %xmm11
- mulps %xmm7, %xmm9
- shufps $177, %xmm6, %xmm6
- mulps %xmm8, %xmm6
- shufps $177, %xmm7, %xmm7
- subps %xmm6, %xmm11
- mulps %xmm7, %xmm8
- movaps %xmm11, %xmm10
- addps %xmm8, %xmm9
- movaps 32(%rsi), %xmm15
- addps %xmm9, %xmm10
- subps %xmm9, %xmm11
- movaps (%rbx,%rax,4), %xmm5
- movaps %xmm15, %xmm6
- movaps (%r12,%rax,4), %xmm12
- movaps %xmm5, %xmm2
- movaps (%r14,%rax,4), %xmm13
- xorps %xmm3, %xmm11 #const
- movaps 48(%rsi), %xmm14
- subps %xmm10, %xmm2
- mulps %xmm12, %xmm6
- addps %xmm10, %xmm5
- mulps %xmm13, %xmm15
- movaps 64(%rsi), %xmm10
- movaps %xmm5, %xmm0
- shufps $177, %xmm12, %xmm12
- shufps $177, %xmm13, %xmm13
- mulps %xmm14, %xmm12
- mulps %xmm13, %xmm14
- subps %xmm12, %xmm6
- addps %xmm14, %xmm15
- movaps (%r13,%rax,4), %xmm7
- movaps %xmm10, %xmm13
- movaps (%r15,%rax,4), %xmm8
- movaps %xmm6, %xmm12
- movaps 80(%rsi), %xmm9
- addq $96, %rsi
- mulps %xmm7, %xmm13
- subps %xmm15, %xmm6
- addps %xmm15, %xmm12
- mulps %xmm8, %xmm10
- subps %xmm12, %xmm0
- addps %xmm12, %xmm5
- shufps $177, %xmm7, %xmm7
- xorps %xmm3, %xmm6 #const
- shufps $177, %xmm8, %xmm8
- movaps %xmm2, %xmm12
- mulps %xmm9, %xmm7
- mulps %xmm8, %xmm9
- subps %xmm7, %xmm13
- addps %xmm9, %xmm10
- movaps (%r9,%rax,4), %xmm4
- shufps $177, %xmm11, %xmm11
- movaps %xmm4, %xmm1
- shufps $177, %xmm6, %xmm6
- addps %xmm11, %xmm1
- subps %xmm11, %xmm4
- addps %xmm6, %xmm12
- subps %xmm6, %xmm2
- movaps %xmm13, %xmm11
- movaps %xmm4, %xmm14
- movaps %xmm1, %xmm6
- subps %xmm10, %xmm13
- addps %xmm10, %xmm11
- xorps %xmm3, %xmm13 #const
- addps %xmm11, %xmm4
- subps %xmm11, %xmm14
- shufps $177, %xmm13, %xmm13
- movaps %xmm5, (%rbx,%rax,4)
- movaps %xmm4, (%r9,%rax,4)
- movaps %xmm2, (%r10,%rax,4)
- subps %xmm13, %xmm1
- addps %xmm13, %xmm6
- movaps %xmm1, (%r11,%rax,4)
- movaps %xmm0, (%r12,%rax,4)
- movaps %xmm14, (%r13,%rax,4)
- movaps %xmm12, (%r14,%rax,4)
- movaps %xmm6, (%r15,%rax,4)
- addq $4, %rax
- cmpq %rcx, %rax
- jne X8_soft_loop
- ret
-
-#ifdef __APPLE__
- .globl _x8_hard
-_x8_hard:
-#else
- .globl x8_hard
-x8_hard:
-#endif
- movaps (%r9), %xmm5
-X8_loop:
- movaps (%r8), %xmm9
-X8_const_2:
- movaps 0xFECA(%rdx,%rax,4), %xmm6
- movaps %xmm9, %xmm11
-X8_const_3:
- movaps 0xFECA(%rdx,%rax,4), %xmm7
- movaps 16(%r8), %xmm8
- mulps %xmm6, %xmm11
- mulps %xmm7, %xmm9
- shufps $177, %xmm6, %xmm6
- mulps %xmm8, %xmm6
- shufps $177, %xmm7, %xmm7
- subps %xmm6, %xmm11
- mulps %xmm7, %xmm8
- movaps %xmm11, %xmm10
- addps %xmm8, %xmm9
- movaps 32(%r8), %xmm15
- addps %xmm9, %xmm10
- subps %xmm9, %xmm11
-X8_const_0:
- movaps 0xFECA(%rdx,%rax,4), %xmm3
- movaps %xmm15, %xmm6
-X8_const_4:
- movaps 0xFECA(%rdx,%rax,4), %xmm12
- movaps %xmm3, %xmm2
-X8_const_6:
- movaps 0xFECA(%rdx,%rax,4), %xmm13
- xorps %xmm5, %xmm11
- movaps 48(%r8), %xmm14
- subps %xmm10, %xmm2
- mulps %xmm12, %xmm6
- addps %xmm10, %xmm3
- mulps %xmm13, %xmm15
- movaps 64(%r8), %xmm10
- movaps %xmm3, %xmm0
- shufps $177, %xmm12, %xmm12
- shufps $177, %xmm13, %xmm13
- mulps %xmm14, %xmm12
- mulps %xmm13, %xmm14
- subps %xmm12, %xmm6
- addps %xmm14, %xmm15
-X8_const_5:
- movaps 0xFECA(%rdx,%rax,4), %xmm7
- movaps %xmm10, %xmm13
-X8_const_7:
- movaps 0xFECA(%rdx,%rax,4), %xmm8
- movaps %xmm6, %xmm12
- movaps 80(%r8), %xmm9
- addq $96, %r8
- mulps %xmm7, %xmm13
- subps %xmm15, %xmm6
- addps %xmm15, %xmm12
- mulps %xmm8, %xmm10
- subps %xmm12, %xmm0
- addps %xmm12, %xmm3
- shufps $177, %xmm7, %xmm7
- xorps %xmm5, %xmm6
- shufps $177, %xmm8, %xmm8
- movaps %xmm2, %xmm12
- mulps %xmm9, %xmm7
- mulps %xmm8, %xmm9
- subps %xmm7, %xmm13
- addps %xmm9, %xmm10
-X8_const_1:
- movaps 0xFECA(%rdx,%rax,4), %xmm4
- shufps $177, %xmm11, %xmm11
- movaps %xmm4, %xmm1
- shufps $177, %xmm6, %xmm6
- addps %xmm11, %xmm1
- subps %xmm11, %xmm4
- addps %xmm6, %xmm12
- subps %xmm6, %xmm2
- movaps %xmm13, %xmm11
- movaps %xmm4, %xmm14
- movaps %xmm1, %xmm6
- subps %xmm10, %xmm13
- addps %xmm10, %xmm11
- xorps %xmm5, %xmm13
- addps %xmm11, %xmm4
- subps %xmm11, %xmm14
- shufps $177, %xmm13, %xmm13
-X8_const1_0:
- movaps %xmm3, 0xFECA(%rdx,%rax,4)
-X8_const1_1:
- movaps %xmm4, 0xFECA(%rdx,%rax,4)
-X8_const1_2:
- movaps %xmm2, 0xFECA(%rdx,%rax,4)
- subps %xmm13, %xmm1
- addps %xmm13, %xmm6
-X8_const1_3:
- movaps %xmm1, 0xFECA(%rdx,%rax,4)
-X8_const1_4:
- movaps %xmm0, 0xFECA(%rdx,%rax,4)
-X8_const1_5:
- movaps %xmm14, 0xFECA(%rdx,%rax,4)
-X8_const1_6:
- movaps %xmm12, 0xFECA(%rdx,%rax,4)
-X8_const1_7:
- movaps %xmm6, 0xFECA(%rdx,%rax,4)
- addq $4, %rax
- cmpq %rcx, %rax
- jne X8_loop
-
-#ifdef __APPLE__
- .globl _sse_leaf_ee_offsets
- .globl _sse_leaf_oo_offsets
- .globl _sse_leaf_eo_offsets
- .globl _sse_leaf_oe_offsets
- .align 4
-_sse_leaf_ee_offsets:
- .long LEAF_EE_const_0-_leaf_ee+0x4
- .long LEAF_EE_const_1-_leaf_ee+0x5
- .long LEAF_EE_const_2-_leaf_ee+0x5
- .long LEAF_EE_const_3-_leaf_ee+0x5
- .long LEAF_EE_const_4-_leaf_ee+0x5
- .long LEAF_EE_const_5-_leaf_ee+0x5
- .long LEAF_EE_const_6-_leaf_ee+0x4
- .long LEAF_EE_const_7-_leaf_ee+0x5
-_sse_leaf_oo_offsets:
- .long LEAF_OO_const_0-_leaf_oo+0x4
- .long LEAF_OO_const_1-_leaf_oo+0x4
- .long LEAF_OO_const_2-_leaf_oo+0x5
- .long LEAF_OO_const_3-_leaf_oo+0x5
- .long LEAF_OO_const_4-_leaf_oo+0x4
- .long LEAF_OO_const_5-_leaf_oo+0x5
- .long LEAF_OO_const_6-_leaf_oo+0x5
- .long LEAF_OO_const_7-_leaf_oo+0x5
-_sse_leaf_eo_offsets:
- .long LEAF_EO_const_0-_leaf_eo+0x5
- .long LEAF_EO_const_1-_leaf_eo+0x4
- .long LEAF_EO_const_2-_leaf_eo+0x4
- .long LEAF_EO_const_3-_leaf_eo+0x4
- .long LEAF_EO_const_4-_leaf_eo+0x5
- .long LEAF_EO_const_5-_leaf_eo+0x5
- .long LEAF_EO_const_6-_leaf_eo+0x4
- .long LEAF_EO_const_7-_leaf_eo+0x5
-_sse_leaf_oe_offsets:
- .long LEAF_OE_const_0-_leaf_oe+0x5
- .long LEAF_OE_const_1-_leaf_oe+0x4
- .long LEAF_OE_const_2-_leaf_oe+0x4
- .long LEAF_OE_const_3-_leaf_oe+0x5
- .long LEAF_OE_const_4-_leaf_oe+0x5
- .long LEAF_OE_const_5-_leaf_oe+0x5
- .long LEAF_OE_const_6-_leaf_oe+0x4
- .long LEAF_OE_const_7-_leaf_oe+0x4
-#else
- .globl sse_leaf_ee_offsets
- .globl sse_leaf_oo_offsets
- .globl sse_leaf_eo_offsets
- .globl sse_leaf_oe_offsets
- .align 4
-sse_leaf_ee_offsets:
- .long LEAF_EE_const_0-leaf_ee+0x4
- .long LEAF_EE_const_1-leaf_ee+0x5
- .long LEAF_EE_const_2-leaf_ee+0x5
- .long LEAF_EE_const_3-leaf_ee+0x5
- .long LEAF_EE_const_4-leaf_ee+0x5
- .long LEAF_EE_const_5-leaf_ee+0x5
- .long LEAF_EE_const_6-leaf_ee+0x4
- .long LEAF_EE_const_7-leaf_ee+0x5
-sse_leaf_oo_offsets:
- .long LEAF_OO_const_0-leaf_oo+0x4
- .long LEAF_OO_const_1-leaf_oo+0x4
- .long LEAF_OO_const_2-leaf_oo+0x5
- .long LEAF_OO_const_3-leaf_oo+0x5
- .long LEAF_OO_const_4-leaf_oo+0x4
- .long LEAF_OO_const_5-leaf_oo+0x5
- .long LEAF_OO_const_6-leaf_oo+0x5
- .long LEAF_OO_const_7-leaf_oo+0x5
-sse_leaf_eo_offsets:
- .long LEAF_EO_const_0-leaf_eo+0x5
- .long LEAF_EO_const_1-leaf_eo+0x4
- .long LEAF_EO_const_2-leaf_eo+0x4
- .long LEAF_EO_const_3-leaf_eo+0x4
- .long LEAF_EO_const_4-leaf_eo+0x5
- .long LEAF_EO_const_5-leaf_eo+0x5
- .long LEAF_EO_const_6-leaf_eo+0x4
- .long LEAF_EO_const_7-leaf_eo+0x5
-sse_leaf_oe_offsets:
- .long LEAF_OE_const_0-leaf_oe+0x5
- .long LEAF_OE_const_1-leaf_oe+0x4
- .long LEAF_OE_const_2-leaf_oe+0x4
- .long LEAF_OE_const_3-leaf_oe+0x5
- .long LEAF_OE_const_4-leaf_oe+0x5
- .long LEAF_OE_const_5-leaf_oe+0x5
- .long LEAF_OE_const_6-leaf_oe+0x4
- .long LEAF_OE_const_7-leaf_oe+0x4
-#endif
-
-#ifdef __APPLE__
- .data
-#else
- .section .data
-#endif
- .p2align 4
-#ifdef __APPLE__
- .globl _sse_constants
-_sse_constants:
-#else
- .globl sse_constants
-sse_constants:
-#endif
- .long 0x00000000,0x80000000,0x00000000,0x80000000
- .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3
- .long 0xbf3504f3,0x3f3504f3,0xbf3504f3,0x3f3504f3
- .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3
- .long 0x00000000,0x00000000,0xbf3504f3,0x3f3504f3
-#ifdef __APPLE__
- .globl _sse_constants_inv
-_sse_constants_inv:
-#else
- .globl sse_constants_inv
-sse_constants_inv:
-#endif
- .long 0x80000000,0x00000000,0x80000000,0x00000000
- .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3
- .long 0x3f3504f3,0xbf3504f3,0x3f3504f3,0xbf3504f3
- .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3
- .long 0x00000000,0x00000000,0x3f3504f3,0xbf3504f3
diff --git a/lib/ffts/src/types.h b/lib/ffts/src/types.h
index 04cbf61..f8997ce 100644
--- a/lib/ffts/src/types.h
+++ b/lib/ffts/src/types.h
@@ -1,10 +1,10 @@
/*
-
+
This file is part of FFTS -- The Fastest Fourier Transform in the South
-
+
Copyright (c) 2012, Anthony M. Blake <amb@anthonix.com>
- Copyright (c) 2012, The University of Waikato
-
+ Copyright (c) 2012, The University of Waikato
+
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,19 +31,15 @@
*/
+#ifndef FFTS_TYPES_H
+#define FFTS_TYPES_H
-#ifndef __TYPES_H__
-#define __TYPES_H__
-
-#define __INLINE static inline __attribute__((always_inline))
-
-#if defined(complex)
- typedef complex float cdata_t;
-#else
- typedef float cdata_t[2];
-#endif
- typedef float data_t;
-
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
#endif
+/* Define complex number as two element array */
+typedef float ffts_cpx_32f[2];
+typedef double ffts_cpx_64f[2];
+#endif /* FFTS_TYPES_H */
diff --git a/lib/ffts/src/vfp.h b/lib/ffts/src/vfp.h
index f733a3f..8286646 100644
--- a/lib/ffts/src/vfp.h
+++ b/lib/ffts/src/vfp.h
@@ -43,3 +43,4 @@ void vfp_x8();
void vfp_end();
#endif
+// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3:
diff --git a/lib/ffts/src/vfp.s b/lib/ffts/src/vfp.s
index 8ced89d..a60367d 100644
--- a/lib/ffts/src/vfp.s
+++ b/lib/ffts/src/vfp.s
@@ -30,7 +30,7 @@
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
+ .fpu vfp
@ assumes r0 = out
@ r1 = in ?
@@ -41,7 +41,7 @@
@ r2 = const pointer
@ & lr = temps
- .align 4
+ .align 4
#ifdef __APPLE__
.globl _vfp_e
_vfp_e:
@@ -50,44 +50,44 @@ _vfp_e:
vfp_e:
#endif
_vfp_e_loop:
- vldr s15, [r2, #8]
- vldr s2, [r3] @ x0
- vldr s0, [r3, #4]
- vldr s4, [r4] @ x1
- vldr s11, [r2]
- vldr s10, [r7] @ x4
- vldr s3, [r7, #4]
- vldr s8, [r8] @ x5
- vldr s1, [r8, #4]
- vldr s14, [r9] @ x6
- vldr s9, [r9, #4]
- vldr s6, [r10] @ x7
- vldr s12, [r10, #4]
+ vldr s15, [r2, #8]
+ vldr s2, [r3] @ x0
+ vldr s0, [r3, #4]
+ vldr s4, [r4] @ x1
+ vldr s11, [r2]
+ vldr s10, [r7] @ x4
+ vldr s3, [r7, #4]
+ vldr s8, [r8] @ x5
+ vldr s1, [r8, #4]
+ vldr s14, [r9] @ x6
+ vldr s9, [r9, #4]
+ vldr s6, [r10] @ x7
+ vldr s12, [r10, #4]
vsub.f32 s18, s3, s1
vsub.f32 s7, s10, s8
vsub.f32 s5, s14, s6
vadd.f32 s6, s14, s6
- vldr s24, [r5, #4]
+ vldr s24, [r5, #4]
vsub.f32 s14, s9, s12
- vldr s22, [r6, #4]
+ vldr s22, [r6, #4]
vadd.f32 s8, s10, s8
- vldr s28, [r6] @ x3
- vldr s17, [r5] @ x2
+ vldr s28, [r6] @ x3
+ vldr s17, [r5] @ x2
vadd.f32 s10, s9, s12
vmul.f32 s13, s18, s15
vmul.f32 s9, s7, s11
vmul.f32 s16, s5, s11
vmul.f32 s18, s18, s11
vmul.f32 s30, s14, s11
- vldr s11, [r4, #4]
- add r3, r3, #8
- add r4, r4, #8
- add r5, r5, #8
- add r6, r6, #8
- add r7, r7, #8
- add r8, r8, #8
- add r9, r9, #8
- add r10, r10, #8
+ vldr s11, [r4, #4]
+ add r3, r3, #8
+ add r4, r4, #8
+ add r5, r5, #8
+ add r6, r6, #8
+ add r7, r7, #8
+ add r8, r8, #8
+ add r9, r9, #8
+ add r10, r10, #8
vmul.f32 s12, s5, s15
vmul.f32 s20, s14, s15
vadd.f32 s5, s2, s4
@@ -111,7 +111,7 @@ _vfp_e_loop:
vsub.f32 s12, s30, s12
vadd.f32 s20, s3, s10
vsub.f32 s15, s3, s10
- vsub.f32 s3, s26, s1
+ vsub.f32 s3, s26, s1
vadd.f32 s18, s9, s13
vadd.f32 s10, s14, s4
vadd.f32 s6, s2, s7 @
@@ -120,15 +120,15 @@ _vfp_e_loop:
vsub.f32 s4, s14, s4
vsub.f32 s8, s22, s16 @
vadd.f32 s1, s28, s12
-ldr lr, [r12], #4
-add lr, r0, lr, lsl #2
-subs r11, r11, #1
- vstr s18, [lr]
+ ldr lr, [r12], #4
+ add lr, r0, lr, lsl #2
+ subs r11, r11, #1
+ vstr s18, [lr]
vsub.f32 s2, s28, s12
vadd.f32 s12, s22, s16 @
vsub.f32 s16, s3, s24 @
vsub.f32 s13, s9, s13
- vstr s26, [lr, #4]
+ vstr s26, [lr, #4]
vadd.f32 s28, s5, s15 @
vsub.f32 s7, s5, s15 @
vadd.f32 s14, s6, s10
@@ -136,26 +136,26 @@ subs r11, r11, #1
vadd.f32 s9, s0, s2 @
vsub.f32 s2, s0, s2 @
vsub.f32 s11, s11, s20
- vstr s28, [lr, #16]
+ vstr s28, [lr, #16]
vadd.f32 s3, s3, s24 @
- vstr s16, [lr, #20]
+ vstr s16, [lr, #20]
vsub.f32 s6, s6, s10
- vstr s13, [lr, #32]
+ vstr s13, [lr, #32]
vsub.f32 s13, s12, s4 @
vsub.f32 s8, s8, s1
vadd.f32 s0, s12, s4 @
- vstr s11, [lr, #36]
- vstr s7, [lr, #48]
- vstr s3, [lr, #52]
- vstr s14, [lr, #8]
- vstr s5, [lr, #12]
- vstr s9, [lr, #24]
- vstr s13, [lr, #28]
- vstr s6, [lr, #40]
- vstr s8, [lr, #44]
- vstr s2, [lr, #56]
- vstr s0, [lr, #60]
- bne _vfp_e_loop
+ vstr s11, [lr, #36]
+ vstr s7, [lr, #48]
+ vstr s3, [lr, #52]
+ vstr s14, [lr, #8]
+ vstr s5, [lr, #12]
+ vstr s9, [lr, #24]
+ vstr s13, [lr, #28]
+ vstr s6, [lr, #40]
+ vstr s8, [lr, #44]
+ vstr s2, [lr, #56]
+ vstr s0, [lr, #60]
+ bne _vfp_e_loop
@ assumes r0 = out
@ r1 = in ?
@@ -461,7 +461,6 @@ _vfp_x8_loop:
bne _vfp_x8_loop
bx lr
-
.align 4
#ifdef __APPLE__
.globl _vfp_end
diff --git a/lib/ffts/tests/Makefile.in b/lib/ffts/tests/Makefile.in
index 1bdace5..280fae1 100644
--- a/lib/ffts/tests/Makefile.in
+++ b/lib/ffts/tests/Makefile.in
@@ -1,7 +1,7 @@
-# Makefile.in generated by automake 1.12.4 from Makefile.am.
+# Makefile.in generated by automake 1.14 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994-2012 Free Software Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -15,23 +15,51 @@
@SET_MAKE@
VPATH = @srcdir@
-am__make_dryrun = \
- { \
- am__dry=no; \
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
- echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
- | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
- *) \
- for am__flg in $$MAKEFLAGS; do \
- case $$am__flg in \
- *=*|--*) ;; \
- *n*) am__dry=yes; break;; \
- esac; \
- done;; \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
esac; \
- test $$am__dry = yes; \
- }
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -52,7 +80,7 @@ build_triplet = @build@
host_triplet = @host@
noinst_PROGRAMS = test$(EXEEXT)
subdir = tests
-DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(top_srcdir)/depcomp
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_check_classpath.m4 \
@@ -73,19 +101,44 @@ PROGRAMS = $(noinst_PROGRAMS)
am_test_OBJECTS = test.$(OBJEXT)
test_OBJECTS = $(am_test_OBJECTS)
test_DEPENDENCIES = $(top_builddir)/src/libffts.la
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
-LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
- --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
- $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+ $(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo " CC " $@;
+am__v_CC_1 =
CCLD = $(CC)
-LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
- --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
- $(LDFLAGS) -o $@
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+ $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo " CCLD " $@;
+am__v_CCLD_1 =
SOURCES = $(test_SOURCES)
DIST_SOURCES = $(test_SOURCES)
am__can_run_installinfo = \
@@ -93,11 +146,29 @@ am__can_run_installinfo = \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
@@ -271,9 +342,10 @@ clean-noinstPROGRAMS:
list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
echo " rm -f" $$list; \
rm -f $$list
+
test$(EXEEXT): $(test_OBJECTS) $(test_DEPENDENCIES) $(EXTRA_test_DEPENDENCIES)
@rm -f test$(EXEEXT)
- $(LINK) $(test_OBJECTS) $(test_LDADD) $(LIBS)
+ $(AM_V_CCLD)$(LINK) $(test_OBJECTS) $(test_LDADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
@@ -284,25 +356,25 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test.Po@am__quote@
.c.o:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
.c.obj:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.c.lo:
-@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
mostlyclean-libtool:
-rm -f *.lo
@@ -310,26 +382,15 @@ mostlyclean-libtool:
clean-libtool:
-rm -rf .libs _libs
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- mkid -fID $$unique
-tags: TAGS
-
-TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ $(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
@@ -341,15 +402,11 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$$unique; \
fi; \
fi
-ctags: CTAGS
-CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
@@ -358,9 +415,10 @@ GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
-cscopelist: $(HEADERS) $(SOURCES) $(LISP)
- list='$(SOURCES) $(HEADERS) $(LISP)'; \
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
@@ -513,18 +571,19 @@ uninstall-am:
.MAKE: install-am install-strip
-.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
- clean-libtool clean-noinstPROGRAMS cscopelist ctags distclean \
- distclean-compile distclean-generic distclean-libtool \
- distclean-tags distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
- pdf pdf-am ps ps-am tags uninstall uninstall-am
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+ clean-libtool clean-noinstPROGRAMS cscopelist-am ctags \
+ ctags-am distclean distclean-compile distclean-generic \
+ distclean-libtool distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags tags-am uninstall uninstall-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
diff --git a/lib/ffts/tests/test.c b/lib/ffts/tests/test.c
index 03f8f7a..d07f766 100644
--- a/lib/ffts/tests/test.c
+++ b/lib/ffts/tests/test.c
@@ -1,7 +1,7 @@
/*
-
- This file is part of SFFT.
-
+
+ This file is part of FFTS.
+
Copyright (c) 2012, Anthony M. Blake
All rights reserved.
@@ -29,148 +29,164 @@
*/
-#include <stdio.h>
-#include <math.h>
+#include "../include/ffts.h"
+#include "../src/ffts_attributes.h"
#ifdef __ARM_NEON__
#endif
-#ifdef HAVE_SSE
- #include <xmmintrin.h>
-#endif
-#include "../include/ffts.h"
+#ifdef HAVE_SSE
+#include <xmmintrin.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
-#define PI 3.1415926535897932384626433832795028841971693993751058209
+#ifndef M_PI
+#define M_PI 3.1415926535897932384626433832795028841971693993751058209
+#endif
-float impulse_error(int N, int sign, float *data) {
+static float impulse_error(int N, int sign, float *data)
+{
#ifdef __ANDROID__
- double delta_sum = 0.0f;
- double sum = 0.0f;
+ double delta_sum = 0.0f;
+ double sum = 0.0f;
#else
- long double delta_sum = 0.0f;
- long double sum = 0.0f;
-#endif
+ long double delta_sum = 0.0f;
+ long double sum = 0.0f;
+#endif
+ int i;
- int i;
- for(i=0;i<N;i++) {
+ for (i = 0; i < N; i++) {
#ifdef __ANDROID__
- double re, im;
- if(sign < 0) {
- re = cos(2 * PI * (double)i / (double)N);
- im = -sin(2 * PI * (double)i / (double)N);
- }else{
- re = cos(2 * PI * (double)i / (double)N);
- im = sin(2 * PI * (double)i / (double)N);
- }
+ double re, im;
+
+ if (sign < 0) {
+ re = cos(2 * M_PI * (double) i / (double) N);
+ im = -sin(2 * M_PI * (double) i / (double) N);
+ } else {
+ re = cos(2 * M_PI * (double) i / (double) N);
+ im = sin(2 * M_PI * (double) i / (double) N);
+ }
#else
- long double re, im;
- if(sign < 0) {
- re = cosl(2 * PI * (long double)i / (long double)N);
- im = -sinl(2 * PI * (long double)i / (long double)N);
- }else{
- re = cosl(2 * PI * (long double)i / (long double)N);
- im = sinl(2 * PI * (long double)i / (long double)N);
- }
+ long double re, im;
+
+ if (sign < 0) {
+ re = cosl(2 * M_PI * (long double) i / (long double) N);
+ im = -sinl(2 * M_PI * (long double) i / (long double) N);
+ } else {
+ re = cosl(2 * M_PI * (long double) i / (long double) N);
+ im = sinl(2 * M_PI * (long double) i / (long double) N);
+ }
#endif
- sum += re * re + im * im;
- re = re - data[2*i];
- im = im - data[2*i+1];
-
- delta_sum += re * re + im * im;
+ sum += re * re + im * im;
+
+ re = re - data[2*i];
+ im = im - data[2*i+1];
+
+ delta_sum += re * re + im * im;
+ }
- }
#ifdef __ANDROID__
- return sqrt(delta_sum) / sqrt(sum);
+ return (float) (sqrt(delta_sum) / sqrt(sum));
#else
- return sqrtl(delta_sum) / sqrtl(sum);
+ return (float) (sqrtl(delta_sum) / sqrtl(sum));
#endif
}
-int
-test_transform(int n, int sign) {
+int test_transform(int n, int sign)
+{
+ ffts_plan_t *p;
-#ifdef HAVE_SSE
- float __attribute__ ((aligned(32))) *input = _mm_malloc(2 * n * sizeof(float), 32);
- float __attribute__ ((aligned(32))) *output = _mm_malloc(2 * n * sizeof(float), 32);
+#ifdef HAVE_SSE
+ float FFTS_ALIGN(32) *input = _mm_malloc(2 * n * sizeof(float), 32);
+ float FFTS_ALIGN(32) *output = _mm_malloc(2 * n * sizeof(float), 32);
#else
- float __attribute__ ((aligned(32))) *input = valloc(2 * n * sizeof(float));
- float __attribute__ ((aligned(32))) *output = valloc(2 * n * sizeof(float));
+ float FFTS_ALIGN(32) *input = valloc(2 * n * sizeof(float));
+ float FFTS_ALIGN(32) *output = valloc(2 * n * sizeof(float));
#endif
- int i;
- for(i=0;i<n;i++) {
- input[2*i] = 0.0f;
- input[2*i+1] = 0.0f;
- }
-
- input[2] = 1.0f;
-
- ffts_plan_t *p = ffts_init_1d(i, sign);
- if(p) {
- ffts_execute(p, input, output);
- printf(" %3d | %9d | %10E\n", sign, n, impulse_error(n, sign, output));
- ffts_free(p);
- }else{
- printf("Plan unsupported\n");
- return 0;
- }
-
- return 1;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ input[2*i + 0] = 0.0f;
+ input[2*i + 1] = 0.0f;
+ }
+
+ input[2] = 1.0f;
+
+ p = ffts_init_1d(i, sign);
+ if (!p) {
+ printf("Plan unsupported\n");
+ return 0;
+ }
+
+ ffts_execute(p, input, output);
+ printf(" %3d | %9d | %10E\n", sign, n, impulse_error(n, sign, output));
+ ffts_free(p);
+ return 1;
}
-int
-main(int argc, char *argv[]) {
-
- if(argc == 3) {
- // test specific transform with test pattern and display output
- int n = atoi(argv[1]);
- int sign = atoi(argv[2]);
+int main(int argc, char *argv[])
+{
+ if (argc == 3) {
+ ffts_plan_t *p;
+ int i;
+
+ /* test specific transform with test pattern and display output */
+ int n = atoi(argv[1]);
+ int sign = atoi(argv[2]);
#ifdef HAVE_SSE
- float __attribute__ ((aligned(32))) *input = _mm_malloc(2 * n * sizeof(float), 32);
- float __attribute__ ((aligned(32))) *output = _mm_malloc(2 * n * sizeof(float), 32);
+ float FFTS_ALIGN(32) *input = _mm_malloc(2 * n * sizeof(float), 32);
+ float FFTS_ALIGN(32) *output = _mm_malloc(2 * n * sizeof(float), 32);
#else
- float __attribute__ ((aligned(32))) *input = valloc(2 * n * sizeof(float));
- float __attribute__ ((aligned(32))) *output = valloc(2 * n * sizeof(float));
+ float FFTS_ALIGN(32) *input = valloc(2 * n * sizeof(float));
+ float FFTS_ALIGN(32) *output = valloc(2 * n * sizeof(float));
#endif
- int i;
- for(i=0;i<n;i++) {
- input[2*i] = i;
- input[2*i+1] = 0.0f;
- }
-
- // input[2] = 1.0f;
-
- ffts_plan_t *p = ffts_init_1d(i, sign);
- if(p) {
- ffts_execute(p, input, output);
- for(i=0;i<n;i++) printf("%d %d %f %f\n", i, sign, output[2*i], output[2*i+1]);
- ffts_free(p);
- }else{
- printf("Plan unsupported\n");
- return 0;
- }
-
-#ifdef HAVE_NEON
- _mm_free(input);
- _mm_free(output);
+
+ for (i = 0; i < n; i++) {
+ input[2*i + 0] = (float) i;
+ input[2*i + 1] = 0.0f;
+ }
+
+ /* input[2] = 1.0f; */
+
+ p = ffts_init_1d(i, sign);
+ if (!p) {
+ printf("Plan unsupported\n");
+ return 0;
+ }
+
+ ffts_execute(p, input, output);
+
+ for (i = 0; i < n; i++)
+ printf("%d %d %f %f\n", i, sign, output[2*i], output[2*i+1]);
+ ffts_free(p);
+
+#ifdef HAVE_SSE
+ _mm_free(input);
+ _mm_free(output);
#else
- free(input);
- free(output);
+ free(input);
+ free(output);
#endif
+ } else {
+ int n, power2;
+
+ /* test various sizes and display error */
+ printf(" Sign | Size | L2 Error\n");
+ printf("------+-----------+-------------\n");
+
+ for (n = 1, power2 = 2; n <= 18; n++, power2 <<= 1) {
+ test_transform(power2, -1);
+ }
+
+ for (n = 1, power2 = 2; n <= 18; n++, power2 <<= 1) {
+ test_transform(power2, 1);
+ }
+ }
- }else{
- // test various sizes and display error
- printf(" Sign | Size | L2 Error\n");
- printf("------+-----------+-------------\n");
- int n;
- for(n=1;n<=18;n++) {
- test_transform(pow(2,n), -1);
- }
- for(n=1;n<=18;n++) {
- test_transform(pow(2,n), 1);
- }
- }
- return 0;
+ return 0;
}